Beispiel #1
0
 def build_graph(parameters):
     """Build the graph for parse_example tests."""
     feature_dtype = parameters["feature_dtype"]
     feature_shape = parameters["feature_shape"]
     is_dense = parameters["is_dense"]
     input_value = tf.compat.v1.placeholder(dtype=tf.string,
                                            name="input",
                                            shape=[1])
     if is_dense:
         feature_default_value = np.zeros(shape=feature_shape)
         if feature_dtype == tf.string:
             feature_default_value = np.array(["missing"] *
                                              feature_shape[0])
         features = {
             "x":
             tf.FixedLenFeature(shape=feature_shape,
                                dtype=feature_dtype,
                                default_value=feature_default_value)
         }
     else:  # Sparse
         features = {"x": tf.VarLenFeature(dtype=feature_dtype)}
     out = tf.parse_example(input_value, features)
     output_tensor = out["x"]
     if not is_dense:
         output_tensor = out["x"].values
     return [input_value], [output_tensor]
Beispiel #2
0
    def example_reading_spec(self):
        data_fields, data_items_to_decoders = (super(
            ImageVqav2Tokens10kLabels3k, self).example_reading_spec())
        data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64)
        data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64)
        data_fields["image/question"] = tf.FixedLenSequenceFeature(
            (), tf.int64, allow_missing=True)
        data_fields["image/answer"] = tf.FixedLenSequenceFeature(
            (), tf.int64, allow_missing=True)

        slim = contrib.slim()
        data_items_to_decoders["question"] = slim.tfexample_decoder.Tensor(
            "image/question")
        data_items_to_decoders["targets"] = slim.tfexample_decoder.Tensor(
            "image/answer")
        return data_fields, data_items_to_decoders
Beispiel #3
0
def parse_tf_example(example_proto):
    """Converts tf.Example proto to dict of Tensors.

  Args:
    example_proto: A raw tf.Example proto.
  Returns:
    A dict of Tensors with fields structure, reward, and batch_index.
  """

    feature_description = dict(structure=tf.FixedLenSequenceFeature(
        (), tf.int64, allow_missing=True),
                               reward=tf.FixedLenFeature([1], tf.float32),
                               batch_index=tf.FixedLenFeature([1], tf.int64))

    return tf.io.parse_single_example(serialized=example_proto,
                                      features=feature_description)
def parse_mnist_tfrecord(serialized_example: tf.Tensor) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
    """
    Parse a TFRecord representing a single MNIST data point into an input
    feature tensor and a label tensor.

    Returns: (features: Dict[str, Tensor], label: Tensor)
    """
    raw = tf.parse_example(
        serialized=serialized_example, features={"image_raw": tf.FixedLenFeature([], tf.string)}
    )
    image = tf.decode_raw(raw["image_raw"], tf.float32)

    label_dict = tf.parse_example(
        serialized=serialized_example, features={"label": tf.FixedLenFeature(1, tf.int64)}
    )
    return {"image": image}, label_dict["label"]
Beispiel #5
0
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
    """Creates an `input_fn` closure to be passed to TPUEstimator."""

    name_to_features = {
        "unique_ids": tf.FixedLenFeature([], tf.int64),
        "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
        "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
        "segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
    }

    if is_training:
        name_to_features["label_ids"] = tf.FixedLenFeature([], tf.int64)

    def _decode_record(record, name_to_features):
        """Decodes a record to a TensorFlow example."""
        example = tf.parse_single_example(record, name_to_features)

        # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
        # So cast all int64 to int32.
        for name in list(example.keys()):
            t = example[name]
            if t.dtype == tf.int64:
                t = tf.to_int32(t)
            example[name] = t

        return example

    def input_fn(params):
        """The actual input function."""
        batch_size = params["batch_size"]

        # For training, we want a lot of parallel reading and shuffling.
        # For eval, we want no shuffling and parallel reading doesn't matter.
        d = tf.data.TFRecordDataset(input_file)
        if is_training:
            d = d.repeat()
            d = d.shuffle(buffer_size=100)

        d = d.apply(
            contrib_data.map_and_batch(
                lambda record: _decode_record(record, name_to_features),
                batch_size=batch_size,
                drop_remainder=drop_remainder))

        return d

    return input_fn
Beispiel #6
0
    def parse_and_preprocess(self, example_proto):
        """
        Returns:
            image: a float tensor with shape [height, width, 3],
                an RGB image with pixel values in the range [0, 1].
            boxes: a float tensor with shape [num_boxes, 4].
            num_boxes: an int tensor with shape [].
        """
        features = {
            'image': tf.FixedLenFeature([], tf.string),
            'num_persons': tf.FixedLenFeature([], tf.int64),
            'boxes': tf.FixedLenSequenceFeature([],
                                                tf.float32,
                                                allow_missing=True)
        }
        parsed_features = tf.parse_single_example(example_proto, features)

        # get an image
        image = tf.image.decode_jpeg(parsed_features['image'], channels=3)
        image = tf.image.convert_image_dtype(image, tf.float32)
        # now pixel values are scaled to the [0, 1] range

        # get number of people on the image
        num_boxes = tf.to_int32(parsed_features['num_persons'])
        # it is assumed that num_boxes > 0

        # get groundtruth boxes, they are in absolute coordinates
        boxes = tf.reshape(parsed_features['boxes'], [num_boxes, 4])

        # to the [0, 1] range
        height, width = tf.shape(image)[0], tf.shape(image)[1]
        scaler = tf.to_float(tf.stack([height, width, height, width]))
        boxes /= scaler

        if self.is_training:
            image, boxes = augmentation(image, boxes, self.image_size)
        else:
            image, boxes = resize_keeping_aspect_ratio(image, boxes,
                                                       self.min_dimension,
                                                       DIVISOR)

        # it could change after augmentations
        num_boxes = tf.shape(boxes)[0]

        features = {'images': image}
        labels = {'boxes': boxes, 'num_boxes': num_boxes}
        return features, labels
Beispiel #7
0
 def __init__(self,
              feature_key_map,
              tfrecordreader_type,
              batch_size,
              num_threads,
              device_id,
              data_dir,
              crop,
              rali_cpu=True):
     super(HybridPipe, self).__init__(batch_size,
                                      num_threads,
                                      device_id,
                                      seed=12 + device_id,
                                      rali_cpu=rali_cpu)
     self.input = ops.TFRecordReader(path=data_dir,
                                     index_path="",
                                     reader_type=tfrecordreader_type,
                                     user_feature_key_map=feature_key_map,
                                     features={
                                         'image/encoded':
                                         tf.FixedLenFeature((), tf.string,
                                                            ""),
                                         'image/class/label':
                                         tf.FixedLenFeature([1], tf.int64,
                                                            -1),
                                         'image/filename':
                                         tf.FixedLenFeature((), tf.string,
                                                            "")
                                     })
     rali_device = 'cpu' if rali_cpu else 'gpu'
     decoder_device = 'cpu' if rali_cpu else 'mixed'
     self.decode = ops.ImageDecoder(user_feature_key_map=feature_key_map,
                                    device=decoder_device,
                                    output_type=types.RGB)
     self.res = ops.Resize(device=rali_device,
                           resize_x=crop[0],
                           resize_y=crop[1])
     self.cmnp = ops.CropMirrorNormalize(device="cpu",
                                         output_dtype=types.FLOAT,
                                         output_layout=types.NCHW,
                                         crop=crop,
                                         image_type=types.RGB,
                                         mean=[0, 0, 0],
                                         std=[255, 255, 255])
     self.coin = ops.CoinFlip(probability=0.5)
     print('rali "{0}" variant'.format(rali_device))
Beispiel #8
0
    def __init__(self, in_file, tokenizer, subject_mention_probability,
                 max_qry_length, is_training, entity2id, tfrecord_filename):
        """Initialize dataset."""
        del subject_mention_probability

        self.gt_file = in_file
        self.max_qry_length = max_qry_length
        self.is_training = is_training

        # Read examples from JSON file.
        self.examples = self.read_examples(in_file, entity2id)
        self.num_examples = len(self.examples)

        if is_training:
            # Pre-shuffle the input to avoid having to make a very large shuffle
            # buffer in in the `input_fn`.
            rng = random.Random(12345)
            rng.shuffle(self.examples)

        # Write to TFRecords file.
        writer = FeatureWriter(filename=tfrecord_filename,
                               is_training=self.is_training,
                               has_bridge=False)
        convert_examples_to_features(examples=self.examples,
                                     tokenizer=tokenizer,
                                     max_query_length=self.max_qry_length,
                                     entity2id=entity2id,
                                     output_fn=writer.process_feature)
        writer.close()

        # Create input_fn.
        names_to_features = {
            "qas_ids": tf.FixedLenFeature([], tf.string),
            "qry_input_ids": tf.FixedLenFeature([self.max_qry_length],
                                                tf.int64),
            "qry_input_mask": tf.FixedLenFeature([self.max_qry_length],
                                                 tf.int64),
            "qry_entity_id": tf.VarLenFeature(tf.int64),
        }
        if is_training:
            names_to_features["answer_entities"] = tf.VarLenFeature(tf.int64)
            names_to_features["exclude_set"] = tf.VarLenFeature(tf.int64)
        self.input_fn = input_fn_builder(input_file=tfrecord_filename,
                                         is_training=self.is_training,
                                         drop_remainder=False,
                                         names_to_features=names_to_features)
Beispiel #9
0
def serving_input_receiver_fn():
    feature_map = {"fids": tf.VarLenFeature(tf.int64)}
    feature_map["example_id"] = tf.FixedLenFeature([], tf.string)

    record_batch = tf.placeholder(dtype=tf.string, name='examples')
    features = tf.parse_example(record_batch, features=feature_map)
    return tf.estimator.export.ServingInputReceiver(
        features, {'examples': record_batch})
Beispiel #10
0
 def read_fn(example):
     features = {
         "image": tf.FixedLenFeature([], tf.string),
     }
     example = tf.parse_single_example(example, features)
     image = decode_img(example["image"], params["dataset"]["image_size"],
                        params["n_channels"])
     return image, image  # returns image twice because they expect 2 returns
Beispiel #11
0
def read_tfRecord(tfRecord_path):
    filename_queue = tf.train.string_input_producer([tfRecord_path],
                                                    shuffle=True)
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label':
                                           tf.FixedLenFeature([10], tf.int64),
                                           'img_raw':
                                           tf.FixedLenFeature([], tf.string)
                                       })
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [32, 32, 3])
    img = tf.cast(img, tf.float32) * (1.0 / 255)
    label = tf.cast(features['label'], tf.float32)
    return img, label
Beispiel #12
0
 def example_reading_spec(self):
   label_key = "image/class/label"
   data_fields, data_items_to_decoders = (
       super(Video2ClassProblem, self).example_reading_spec())
   data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64)
   data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor(
       label_key)
   return data_fields, data_items_to_decoders
Beispiel #13
0
def get_roomsim_spec(num_sources, num_receivers, num_samples):
    """Returns a specification of features in tf.Examples in roomsim format.

  Args:
    num_sources: Expected number of sources.
    num_receivers: Number of microphones in array.
    num_samples: Expected length of sources in samples. 'None' for variable.

  Returns:
    Feature specifications suitable to pass to tf.parse_example.
  """
    spec = {}
    spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
        [num_receivers, num_samples], tf.float32)
    spec[Features.SOURCE_IMAGES] = tf.FixedLenFeature(
        [num_sources, num_receivers, num_samples], tf.float32)
    return spec
Beispiel #14
0
    def parse_function(self, data):

        features = tf.parse_single_example(
            data,
            features={
                'image': tf.FixedLenFeature([], tf.string),
                'caption': tf.FixedLenFeature([self.config.max_length],
                                              tf.int64)
            })

        image = self.image_decode(features['image'])
        image = image_processing.image_processing(image, self.config.img_size,
                                                  self.is_training)
        caption = self.caption_processing(features['caption'],
                                          self.is_training)

        return image, caption
 def parser(serialized_example):
     """Parses a single tf.Example into image and label tensors."""
     features = tf.parse_single_example(serialized_example,
                                        features={
                                            "image":
                                            tf.FixedLenFeature([],
                                                               tf.string),
                                            "label":
                                            tf.FixedLenFeature([],
                                                               tf.int64),
                                        })
     image = tf.decode_raw(features["image"], tf.uint8)
     image.set_shape([3 * 32 * 32])
     image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
     image = tf.transpose(tf.reshape(image, [3, 32, 32]))
     label = tf.cast(features["label"], tf.int32)
     return image, label
Beispiel #16
0
def parser(serialized_example):
    """Parses a single Example into image and label tensors."""
    features = tf.parse_single_example(
        serialized_example,
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
            'label': tf.FixedLenFeature([], tf.int64)  # label is unused
        })
    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image.set_shape([28 * 28])
    image = tf.reshape(image, [28, 28, 1])

    # Normalize the values of the image from [0, 255] to [-1.0, 1.0]
    image = tf.cast(image, tf.float32) * (2.0 / 255) - 1.0

    label = tf.cast(tf.reshape(features['label'], shape=[]), dtype=tf.int32)
    return image, label
Beispiel #17
0
        def _parse_nsynth(record):
            """Parsing function for NSynth dataset."""
            features = {
                'pitch': tf.FixedLenFeature([1], dtype=tf.int64),
                'audio': tf.FixedLenFeature([length], dtype=tf.float32),
                'qualities': tf.FixedLenFeature([10], dtype=tf.int64),
                'instrument_source': tf.FixedLenFeature([1], dtype=tf.int64),
                'instrument_family': tf.FixedLenFeature([1], dtype=tf.int64),
            }

            example = tf.parse_single_example(record, features)
            wave, label = example['audio'], example['pitch']
            wave = spectral_ops.crop_or_pad(wave[tf.newaxis, :, tf.newaxis],
                                            length, channels)[0]
            one_hot_label = tf.one_hot(label_index_table.lookup(label),
                                       depth=len(pitches))[0]
            return wave, one_hot_label, label, example['instrument_source']
 def __init__(self, tfrecord_files, img_shape, labeled=False):
   super(Dataset, self).__init__()
   self._tfrecords = nest.flatten(tfrecord_files)
   self._img_shape = img_shape
   tf.logging.info(tfrecord_files)
   tf.logging.info(labeled)
   if labeled:
     self._feature_description['labeled'] = tf.FixedLenFeature([], tf.int64)
Beispiel #19
0
 def _parse_tfrecord(serialized_example):
     fs = tf.parse_single_example(
         serialized_example,
         features={
             'point':
             tf.FixedLenFeature([n_frames * n_points * n_dims],
                                tf.float32),
             'label':
             tf.FixedLenFeature([n_frames * n_points * 1], tf.float32),
             'vert':
             tf.FixedLenFeature([n_frames * n_vert * n_dims],
                                tf.float32),
             'weight':
             tf.FixedLenFeature([n_frames * n_vert * n_parts],
                                tf.float32),
             'transform':
             tf.FixedLenFeature(
                 [n_frames * n_parts * (n_dims + 1) * (n_dims + 1)],
                 tf.float32),
             'joint':
             tf.FixedLenFeature([n_frames * n_parts * n_dims],
                                tf.float32),
             'name':
             tf.FixedLenFeature([], tf.string),
         })
     fs['point'] = tf.reshape(fs['point'], [n_frames, n_points, n_dims])
     fs['label'] = tf.reshape(fs['label'], [n_frames, n_points, 1])
     fs['vert'] = tf.reshape(fs['vert'], [n_frames, n_vert, n_dims])
     fs['weight'] = tf.reshape(fs['weight'],
                               [n_frames, n_vert, n_parts])
     fs['transform'] = tf.reshape(
         fs['transform'], [n_frames, n_parts, n_dims + 1, n_dims + 1])
     fs['joint'] = tf.reshape(fs['joint'], [n_frames, n_parts, n_dims])
     return fs
Beispiel #20
0
def input_fn_builder(tfrecord_file,
                     seq_length,
                     is_training,
                     batch_size,
                     drop_remainder=True):
    """Creates an `input_fn` closure to be passed to Estimator."""
    logging.info(
        "Creating input fun with batch_size: {} and drop remainder: {}".format(
            str(batch_size), str(drop_remainder)))
    name_to_features = {
        "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
        "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
        "segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
        "score": tf.FixedLenFeature([], tf.float32)
    }

    def _decode_record(record, name_to_features):
        """Decodes a record to a TensorFlow example."""
        example = tf.parse_single_example(record, name_to_features)
        # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
        # So cast all int64 to int32.
        for name in list(example.keys()):
            t = example[name]
            if t.dtype == tf.int64:
                t = tf.to_int32(t)
            example[name] = t
        return example

    def input_fn(params):  # pylint: disable=unused-argument
        """Acutal data generator."""
        tfrecord_file_expanded = tf.io.gfile.glob(tfrecord_file)
        n_files = len(tfrecord_file_expanded)
        if n_files > 1:
            logging.info("Found {} files matching {}".format(
                str(n_files), tfrecord_file))

        d = tf.data.TFRecordDataset(tfrecord_file_expanded)
        if is_training:
            d = d.repeat()
            d = d.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
        d = d.map(lambda record: _decode_record(record, name_to_features))
        d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
        return d

    return input_fn
 def __init__(self):
     """Constructor sets keys_to_features and items_to_handlers."""
     self.keys_to_features = {
         'image/encoded':
         tf.FixedLenFeature((), tf.string, default_value=''),
         'image/filename':
         tf.FixedLenFeature((), tf.string, default_value=''),
         'image/format':
         tf.FixedLenFeature((), tf.string, default_value='jpeg'),
         'image/height':
         tf.FixedLenFeature((), tf.int64, default_value=0),
         'image/width':
         tf.FixedLenFeature((), tf.int64, default_value=0),
         'image/segmentation/class/encoded':
         tf.FixedLenFeature((), tf.string, default_value=''),
         'image/segmentation/class/format':
         tf.FixedLenFeature((), tf.string, default_value='png'),
     }
     self.items_to_handlers = {
         'image':
         slim_example_decoder.Image(image_key='image/encoded',
                                    format_key='image/format',
                                    channels=3),
         'labels_class':
         slim_example_decoder.Image(
             image_key='image/segmentation/class/encoded',
             format_key='image/segmentation/class/format',
             channels=1)
     }
Beispiel #22
0
  def get_example(self, batch_size):
    """Get a single example from the tfrecord file.

    Args:
      batch_size: Int, minibatch size.

    Returns:
      tf.Example protobuf parsed from tfrecord.
    """
    reader = tf.TFRecordReader()
    num_epochs = None if self.is_training else 1
    capacity = batch_size
    path_queue = tf.train.input_producer(
        [self.record_path],
        num_epochs=num_epochs,
        shuffle=self.is_training,
        capacity=capacity)
    unused_key, serialized_example = reader.read(path_queue)
    features = {
        "note_str": tf.FixedLenFeature([], dtype=tf.string),
        "pitch": tf.FixedLenFeature([1], dtype=tf.int64),
        "velocity": tf.FixedLenFeature([1], dtype=tf.int64),
        "audio": tf.FixedLenFeature([64000], dtype=tf.float32),
        "qualities": tf.FixedLenFeature([10], dtype=tf.int64),
        "instrument_source": tf.FixedLenFeature([1], dtype=tf.int64),
        "instrument_family": tf.FixedLenFeature([1], dtype=tf.int64),
    }
    example = tf.parse_single_example(serialized_example, features)
    return example
Beispiel #23
0
 def parse_fn(example):
     feature_map = {}
     feature_map["example_id"] = tf.FixedLenFeature([], tf.string)
     feature_map['fids'] = tf.VarLenFeature(tf.int64)
     # feature_map['y'] = tf.FixedLenFeature([], tf.int64)
     features = tf.parse_example(example, features=feature_map)
     # labels = {'y': features.pop('y')}
     labels = {'y': tf.constant(0)}
     return features, labels
  def feature_loading_spec(self):
    """Returns a dictionary with information needed to deserialize tf.Examples.

    Returns:
      A dictionary mapping feature names to example-reading specs.
    """
    return {
        self.key(): tf.FixedLenFeature(self._shape, self._dtype)
    }
Beispiel #25
0
def decode(serialized_example):
    feature = {
        'image_raw': tf.FixedLenFeature([], tf.string),
        'label': tf.VarLenFeature(tf.int64)
    }
    features = tf.parse_single_example(serialized_example, features=feature)
    image = tf.decode_raw(features['image_raw'], tf.uint8)
    label = features['label'].values
    return image, label
Beispiel #26
0
def parse_example(serialized_example):
    """Parse example."""
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           "question":
                                           tf.FixedLenFeature([], tf.string),
                                           "context":
                                           tf.FixedLenFeature([], tf.string),
                                           "answer_start":
                                           tf.FixedLenFeature([], tf.int64),
                                           "answer_end":
                                           tf.FixedLenFeature([], tf.int64),
                                       })
    features["question"] = split_on_whitespace(features["question"])
    features["context"] = split_on_whitespace(features["context"])
    features["answer_start"] = tf.to_int32(features["answer_start"])
    features["answer_end"] = tf.to_int32(features["answer_end"])
    return features
Beispiel #27
0
def input_fn(bridge, trainer_master=None):
    dataset = flt.data.DataBlockLoader(
        args.batch_size, ROLE, bridge, trainer_master)
    feature_map = {"fids": tf.VarLenFeature(tf.int64)}
    feature_map["example_id"] = tf.FixedLenFeature([], tf.string)

    record_batch = dataset.make_batch_iterator().get_next()
    features = tf.parse_example(record_batch, features=feature_map)
    return features, None
Beispiel #28
0
 def parse_fn(example):
     feature_map = {
         "x_{0}".format(i): tf.VarLenFeature(tf.int64)
         for i in range(512)
     }
     feature_map["example_id"] = tf.FixedLenFeature([], tf.string)
     features = tf.parse_example(example, features=feature_map)
     labels = {}
     return features, labels
Beispiel #29
0
    def train(self):
        #输入向量
        inputs = tf.placeholder('float',[None,224,224,3])
        #输出向量
        outputs = tf.placeholder('float',[None,224,224,3])
        #encoded是图片经过编码器的结果,decoded是图片又经过解码器的结果,decoded_encoded生成新图片经过编码器的结果
        encoded,decoded,decoded_encoded = self.encoder_decoder(inputs)
        #内容损失 重建图像和原图(outputs,decoded)
        pixel_loss = tf.losses.mean_squared_error(decoded,outputs)
        #风格损失 原图经过编码器(encoded)和重建图像经过解码器(decoded_encoded)
        feature_loss = tf.losses.mean_squared_error(decoded_encoded,encoded)
        # loss = pixel_loss+ feature_loss
        loss=0.5*pixel_loss + 0.1*feature_loss
        opt= tf.train.AdamOptimizer(0.0001).minimize(loss)
        #训练集的位置
        tfrecords_filename =  self.tfrecord_path
        filename_queue = tf.train.string_input_producer([tfrecords_filename],num_epochs=100)

        reader = tf.TFRecordReader()  
        _, serialized_example = reader.read(filename_queue)

        feature2 = {  
                    'image_raw': tf.FixedLenFeature([], tf.string)} 
        features = tf.parse_single_example(serialized_example, features=feature2)  
        image = tf.decode_raw(features['image_raw'], tf.uint8) 
        image = tf.reshape(image,[224,224,3])   
        images = tf.train.shuffle_batch([image], batch_size=self.batch_size, capacity=30, min_after_dequeue=10)
        
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config = config)as sess  :
             tf.global_variables_initializer().run()
             tf.local_variables_initializer().run()
        

        
             coord = tf.train.Coordinator()  
             threads = tf.train.start_queue_runners(coord=coord)  
  
             saver = tf.train.Saver()
             

             for i in range (self.max_iterator):
                 batch_x=sess.run(images)
                 feed_dict = {inputs:batch_x, outputs : batch_x}
            
                 _,p_loss,f_loss,reconstruct_imgs=sess.run([opt,pixel_loss,feature_loss,decoded],feed_dict=feed_dict)
            
                 print('step %d |  pixel_loss is %f   | feature_loss is %f  |'%(i,p_loss,f_loss))
            
                 if i % 5 ==0:
                    result_img = np.clip(reconstruct_imgs[0],0,255).astype(np.uint8)
                    imsave('result.jpg',result_img)
                
             saver.save(sess,self.checkpoint_path)
             coord.request_stop()  
             coord.join(threads)
Beispiel #30
0
def feature_descriptions(max_num_entities):
    """Create a dictionary describing the dataset features.

  Args:
    max_num_entities: int. The maximum number of foreground and background
      entities in each image. This corresponds to the number of segmentation
      masks returned per scene.

  Returns:
    A dictionary which maps feature names to `tf.Example`-compatible shape and
    data type descriptors.
  """
    return {
        'image':
        tf.FixedLenFeature(IMAGE_SIZE + [3], tf.string),
        'mask':
        tf.FixedLenFeature([max_num_entities] + IMAGE_SIZE + [1], tf.string),
    }