def minibatch(self, dataset, subset, use_datasets, cache_data,
                  shift_ratio=-1):
        del dataset, use_datasets, cache_data, shift_ratio
        if (not hasattr(self, 'fake_images') or not hasattr(self, 'fake_labels')):
            raise ValueError('Must call set_fake_data() before calling minibatch '
                             'on TestImagePreprocessor')
        if self.expected_subset is not None:
            assert subset == self.expected_subset

        with tf.name_scope('batch_processing'):
            image_slice, label_slice = tf.train.slice_input_producer(
                [self.fake_images, self.fake_labels],
                shuffle=False,
                name='image_slice')
            raw_images, raw_labels = tf.train.batch(
                [image_slice, label_slice], batch_size=self.batch_size,
                name='image_batch')
            images = [[] for _ in range(self.num_splits)]
            labels = [[] for _ in range(self.num_splits)]
            for i in xrange(self.batch_size):
                split_index = i % self.num_splits
                raw_image = tf.cast(raw_images[i], self.dtype)
                images[split_index].append(raw_image)
                labels[split_index].append(raw_labels[i])
            for split_index in xrange(self.num_splits):
                images[split_index] = tf.parallel_stack(images[split_index])
                labels[split_index] = tf.parallel_stack(labels[split_index])

            return images, labels
示例#2
0
def input_fn(data_dir, subset, num_shards, batch_size):
    """Create input graph for model.

    Args:
        data_dir: Directory where TFRecords representing the dataset are located.
        subset: one of 'train', 'validate' and 'eval'.
        num_shards: num of towers participating in data-parallel training.
        batch_size: total batch size for training to be divided by the number of
        shards.
    Returns:
        two lists of tensors for features and labels, each of num_shards length.
    """
    with tf.device('/cpu:0'):
        dataset = mlp_data.MlpDataSet(data_dir, subset)
        image_batch, label_batch = dataset.make_batch(batch_size)
        if num_shards <= 1:
            # No GPU available or only 1 GPU.
            return [image_batch], [label_batch]

        # Note that passing num=batch_size is safe here, even though
        # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
        # examples. This is because it does so only when repeating for a limited
        # number of epochs, but our dataset repeats forever.
        image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
        label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
        feature_shards = [[] for i in range(num_shards)]
        label_shards = [[] for i in range(num_shards)]
        for i in xrange(batch_size):
            idx = i % num_shards
            feature_shards[idx].append(image_batch[i])
            label_shards[idx].append(label_batch[i])
        feature_shards = [tf.parallel_stack(x) for x in feature_shards]
        label_shards = [tf.parallel_stack(x) for x in label_shards]
        return feature_shards, label_shards
  def minibatch(self, dataset, subset, use_datasets, cache_data,
                shift_ratio=-1):
    # TODO(jsimsa): Implement datasets code path
    del use_datasets, cache_data, shift_ratio
    with tf.name_scope('batch_processing'):
      all_images, all_labels = dataset.read_data_files(subset)
      all_images = tf.constant(all_images)
      all_labels = tf.constant(all_labels)
      input_image, input_label = tf.train.slice_input_producer(
          [all_images, all_labels])
      input_image = tf.cast(input_image, self.dtype)
      input_label = tf.cast(input_label, tf.int32)
      # Ensure that the random shuffling has good mixing properties.
      min_fraction_of_examples_in_queue = 0.4
      min_queue_examples = int(dataset.num_examples_per_epoch(subset) *
                               min_fraction_of_examples_in_queue)
      raw_images, raw_labels = tf.train.shuffle_batch(
          [input_image, input_label], batch_size=self.batch_size,
          capacity=min_queue_examples + 3 * self.batch_size,
          min_after_dequeue=min_queue_examples)

      images = [[] for i in range(self.num_splits)]
      labels = [[] for i in range(self.num_splits)]

      # Create a list of size batch_size, each containing one image of the
      # batch. Without the unstack call, raw_images[i] would still access the
      # same image via a strided_slice op, but would be slower.
      raw_images = tf.unstack(raw_images, axis=0)
      raw_labels = tf.unstack(raw_labels, axis=0)
      for i in xrange(self.batch_size):
        split_index = i % self.num_splits
        # The raw image read from data has the format [depth, height, width]
        # reshape to the format returned by minibatch.
        raw_image = tf.reshape(raw_images[i],
                               [dataset.depth, dataset.height, dataset.width])
        raw_image = tf.transpose(raw_image, [1, 2, 0])
        image = self.preprocess(raw_image)
        images[split_index].append(image)

        labels[split_index].append(raw_labels[i])

      for split_index in xrange(self.num_splits):
        images[split_index] = tf.parallel_stack(images[split_index])
        labels[split_index] = tf.parallel_stack(labels[split_index])
      return images, labels
示例#4
0
 def feature_shard(self, feature, num_shards):
     if num_shards > 1:
         feature_batch = tf.unstack(
             feature, num=self.config.batch_size, axis=0)
         feature_shards = [[] for i in range(num_shards)]
         for i in range(self.config.batch_size):
             idx = i % num_shards
             feature_shards[idx].append(feature_batch[i])
         feature_shards = [tf.parallel_stack(x) for x in feature_shards]
     else:
         feature_shards = [feature]
     return feature_shards
    def device_minibatches(cls, num_devices, data_dir, total_batch_size,
                           height, width, distort_color,
                           val=False):
        dtype = tf.float32
        subset = 'validation' if val else 'train'

        nrecord = get_num_records(os.path.join(
            data_dir, '{}-*'.format(subset)))
        input_buffer_size = min(10000, nrecord)

        record_input = data_flow_ops.RecordInput(
            file_pattern=os.path.join(data_dir, '{}-*'.format(subset)),
            parallelism=64,
            # Note: This causes deadlock during init if
            # larger than dataset
            buffer_size=input_buffer_size,
            batch_size=total_batch_size,
            seed=0)

        records = record_input.get_yield_op()

        # Split batch into individual images
        records = tf.split(records, total_batch_size, 0)
        records = [tf.reshape(record, []) for record in records]
        # Deserialize and preprocess images into batches for each device
        images = defaultdict(list)
        labels = defaultdict(list)
        with tf.name_scope('input_pipeline'):
            for thread_id, record in enumerate(records):
                imgdata, label, bbox, _ = cls._deserialize_image_record(record)
                image = cls._preprocess(
                    imgdata, bbox, thread_id, height, width, distort_color,
                    val=val)
                label -= 1  # Change to 0-based (don't use background class)
                device_num = thread_id % num_devices
                images[device_num].append(image)
                labels[device_num].append(label)

            # Stack images back into a sub-batch for each device
            for device_num in xrange(num_devices):
                images[device_num] = tf.parallel_stack(images[device_num])
                labels[device_num] = tf.concat(labels[device_num], 0)
                images[device_num] = tf.reshape(
                    images[device_num], [-1, height, width, 3])
                images[device_num] = tf.clip_by_value(
                    images[device_num], 0., 255.)
                images[device_num] = tf.cast(images[device_num], dtype)

        return images, labels, nrecord
示例#6
0
  def minibatch(self, dataset, subset):
    with tf.name_scope('batch_processing'):
      images = [[] for i in range(self.device_count)]
      labels = [[] for i in range(self.device_count)]
      record_input = data_flow_ops.RecordInput(
          file_pattern=dataset.tf_record_pattern(subset),
          seed=301,
          parallelism=64,
          buffer_size=10000,
          batch_size=self.batch_size,
          name='record_input')
      records = record_input.get_yield_op()
      records = tf.split(records, self.batch_size, 0)
      records = [tf.reshape(record, []) for record in records]
      for i in xrange(self.batch_size):
        value = records[i]
        image_buffer, label_index, bbox, _ = parse_example_proto(value)
        image = self.preprocess(image_buffer, bbox, i % 4)
        device_index = i % self.device_count
        images[device_index].append(image)
        labels[device_index].append(label_index)
      label_index_batch = [None] * self.device_count
      for device_index in xrange(self.device_count):
        images[device_index] = tf.parallel_stack(images[device_index])
        label_index_batch[device_index] = tf.concat(labels[device_index], 0)

        # dynamic_pad=True) # HACK TESTING dynamic_pad=True
        images[device_index] = tf.cast(images[device_index], self.dtype)
        depth = 3
        images[device_index] = tf.reshape(
            images[device_index],
            shape=[self.batch_size_per_device, self.height, self.width, depth])
        label_index_batch[device_index] = tf.reshape(
            label_index_batch[device_index], [self.batch_size_per_device])
        # Display the training images in the visualizer.
        # tf.summary.image('images', images)

      return images, label_index_batch
示例#7
0
    def get_loader_w3d(self):
        """
        Similar to get_loader, but outputs are:
          image_batch: batched images as per data_format
          label_batch: batched keypoint labels N x K x 3
          label3d_batch: batched keypoint labels N x (216 + 10 + 42)
                         216=24*3*3 pose, 10 shape, 42=14*3 3D joints
                         (3D datasets only have 14 joints annotated)
          has_gt3d_batch: batched indicator for
                          existence of [3D joints, 3D SMPL] labels N x 2 - bool
                          Note 3D SMPL is only available for H3.6M.


        Problem is that those datasets without pose/shape do not have them
        in the tfrecords. There's no way to check for this in TF,
        so, instead make 2 string_input_producers, one for data without 3d
        and other for data with 3d.
        And send [2 x *] to train.*batch
        """
        datasets_nomv = [d for d in self.datasets if d not in _MV_DATASETS]
        datasets_yesmv = [d for d in self.datasets if d in _MV_DATASETS]

        # TODO: synthetic data has smpl but no 3d joint!
        files_nomv = data_utils.get_all_files(self.dataset_dir, datasets_nomv)
        files_yesmv = data_utils.get_all_files(self.dataset_dir,
                                               datasets_yesmv)

        # Make sure we have dataset with 3D.

        do_shuffle = True

        if len(files_yesmv) != 0:
            fqueue_yesmv = tf.train.string_input_producer(files_yesmv,
                                                          shuffle=do_shuffle,
                                                          name="input_wmv")
            image, label, label3d, has_smpl3d, pose, _ = self.read_data(
                fqueue_yesmv, has_multiview=True)
            if len(files_nomv) != 0:
                fqueue_nomv = tf.train.string_input_producer(
                    files_nomv, shuffle=do_shuffle, name="input_woutmv")
                image_nomv, label_nomv, label3d_nomv, has_smpl3d_nomv, pose_nomv, has_3djoint = self.read_data(
                    fqueue_nomv, has_multiview=False)
                image = tf.parallel_stack([image, image_nomv])
                label = tf.parallel_stack([label, label_nomv])
                label3d = tf.parallel_stack([label3d, label3d_nomv])
                has_smpl3d_nomv = tf.concat([has_3djoint, has_smpl3d_nomv],
                                            axis=0)
                has_3dgt = tf.parallel_stack([has_smpl3d, has_smpl3d_nomv])
                pose = tf.parallel_stack([pose, pose_nomv])
            else:
                assert False
                # If no "no3d" images, need to make them 1 x *
                image = tf.expand_dims(image, 0)
                label = tf.expand_dims(label, 0)
                label3d = tf.expand_dims(label3d, 0)
                has_3dgt = tf.expand_dims(has_smpl3d, 0)
                pose = tf.expand_dims(pose, 0)
        else:
            fqueue_nomv = tf.train.string_input_producer(files_nomv,
                                                         shuffle=do_shuffle,
                                                         name="input_woutmv")
            image, label, label3d, has_smpl3d_nomv, pose, has_3djoint = self.read_data(
                fqueue_nomv, has_multiview=False)
            has_3dgt = tf.concat([has_3djoint, has_smpl3d_nomv], axis=0)
            image = tf.expand_dims(image, 0)
            label = tf.expand_dims(label, 0)
            label3d = tf.expand_dims(label3d, 0)
            has_3dgt = tf.expand_dims(has_3dgt, 0)
            pose = tf.expand_dims(pose, 0)
        # Combine 3D bools.
        # each is 2 x 1, column is [3d_joints, 3d_smpl]

        min_after_dequeue = 2000
        capacity = min_after_dequeue + 3 * self.batch_size

        print('image.shape=', image.shape)
        print('label.shape=', label.shape)
        print('label3d.shape=', label3d.shape)
        print('has_3dgt.shape=', has_3dgt.shape)
        print('pose.shape=', pose.shape)
        image_batch, label_batch, label3d_batch, bool_batch, pose_batch = tf.train.shuffle_batch(
            [image, label, label3d, has_3dgt, pose],
            batch_size=self.batch_size,
            num_threads=8,
            capacity=capacity,
            min_after_dequeue=min_after_dequeue,
            enqueue_many=True,
            name='input_batch_train_3d')

        if self.data_format == 'NCHW':
            image_batch = tf.transpose(image_batch, [0, 3, 1, 2])
        elif self.data_format == 'NHWC':
            pass
        else:
            raise Exception("[!] Unkown data_format: {}".format(
                self.data_format))

        batch_dict = {
            'image': image_batch,
            'label': label_batch,
            'label3d': label3d_batch,
            'has3d': bool_batch,
            'oripose': pose_batch,
        }

        return batch_dict
示例#8
0
 def aggregate(self, gradients):
   # Assertion
   assert len(gradients) > 0, "Empty list of gradient to aggregate"
   # Computation
   gradients = tf.parallel_stack(gradients)
   return tf.py_func(self._aggregate, [gradients], gradients.dtype, stateful=False, name="GAR_averaged-median")
示例#9
0
    def build_model(self, metadata_path=None, embedding_weights=None):
        # Transforms the `embedding_weights` data (that are just numpy variables) into a
        # tf.Variable() object (or, if `embedding_weights` is None, just creates a new
        # randomly tf.Variable()
        self.embedding_weights, self.config = ops.embedding_layer(
            metadata_path, embedding_weights)

        # Transforms the `self.input` from a list of numbers into a list of word vectors
        # Output is it Batch x Time x Word_Vector
        self.embedded_input = tf.nn.embedding_lookup(self.embedding_weights,
                                                     self.input)

        # Generate a random fixed vector
        self.fixed_vec = tf.get_variable("fixed_vec", [128], trainable=False)
        self.fixed_vec = tf.parallel_stack([self.fixed_vec] *
                                           self.args.get("sequence_length"))
        new_fixed_vec = self.fixed_vec

        for i in range(1):
            # Concatenate the fixed vector with each word vector
            input_and_fixed = concatenate_matrices(new_fixed_vec,
                                                   self.embedded_input, 64)

            # Apply a softmax in each sequence (i.e., in each element of the batch)
            self.softmaxed_sequences = []
            self.rescaled_sequences = []
            for j, item in enumerate(input_and_fixed):
                sequence = tf.stack(input_and_fixed[j])

                # The Dense layer expects Batch x Input. I am fooling it into believing that
                # it got a batch, and it will process each word separately, which is what I
                # want.
                fc_out = tf.layers.dense(sequence, 1)
                softmaxed_seq = tf.nn.softmax(fc_out)
                self.softmaxed_sequences.append(softmaxed_seq)

                rescaled_seq = tf.multiply(sequence, softmaxed_seq)
                self.rescaled_sequences.append(rescaled_seq)

            self.rescaled_sequences = tf.stack(self.rescaled_sequences)

            # For now, just hardcoding values here
            self.lstm_out = ops.lstm_block(self.rescaled_sequences,
                                           hidden_units=128,
                                           dropout=0.5,
                                           layers=1,
                                           dynamic=False,
                                           bidirectional=True)

            self.loop_dense = tf.layers.dense(self.lstm_out,
                                              128,
                                              activation=tf.nn.sigmoid)
            new_fixed_vec = self.loop_dense

        self.final_dense = tf.layers.dense(self.loop_dense,
                                           1,
                                           activation=tf.nn.sigmoid)
        self.out = tf.squeeze(self.final_dense, 1)

        with tf.name_scope("loss"):
            self.loss = losses.mean_squared_error(self.expected_output,
                                                  self.out)

            if self.args["l2_reg_beta"] > 0.0:
                self.regularizer = ops.get_regularizer(
                    self.args["l2_reg_beta"])
                self.loss = tf.reduce_mean(self.loss + self.regularizer)

        #### Evaluation Measures.
        with tf.name_scope("Pearson_correlation"):
            self.pco, self.pco_update = tf.contrib.metrics.streaming_pearson_correlation(
                self.out, self.expected_output, name="pearson")
        with tf.name_scope("MSE"):
            self.mse, self.mse_update = tf.metrics.mean_squared_error(
                self.expected_output, self.out, name="mse")
示例#10
0
    def get_loader(self):
        """
        Returns:
            batch_dict (dict):
                image (BxTxHxWx3).
                label (BxTx19x3).
                pose (BxTx24x3x3).
                shape (Bx10).
                fnames (BxT).
                joint (BxTx14x3).
                has3d (Bx2).
        """
        if self.split_balanced:
            datasets_2d = [d for d in self.datasets if d not in _3D_DATASETS]
            datasets_3d = [d for d in self.datasets if d in _3D_DATASETS]
        else:
            datasets_2d = [d for d in self.datasets]
            datasets_3d = datasets_2d[::-1]

        files_2d = data_utils.get_all_files(self.dataset_dir, datasets_2d)
        files_3d = data_utils.get_all_files(self.dataset_dir, datasets_3d)

        def split_list(list_in):
            mid_way = int(len(list_in) / 2)
            return list_in[:mid_way], list_in[mid_way:]

        # Split files_3d in two if one is empty.
        if len(files_2d) == 0:
            files_2d, files_3d = split_list(files_3d)
        elif len(files_3d) == 0:
            files_2d, files_3d = split_list(files_2d)

        do_shuffle = True
        fqueue_2d = tf.train.string_input_producer(files_2d,
                                                   shuffle=do_shuffle,
                                                   name='input_2d',
                                                   capacity=128)
        fqueue_3d = tf.train.string_input_producer(files_3d,
                                                   shuffle=do_shuffle,
                                                   name='input_3d',
                                                   capacity=128)

        ret_dict_2d = self.read_data(fqueue_2d)
        ret_dict_3d = self.read_data(fqueue_3d)

        if self.precomputed_phi:
            pack_name = ['phis', 'images']
        else:
            if self.data_format == 'NCHW':
                # TxHxWx3 --> Tx3xHxW
                ret_dict_2d['images'] = tf.transpose(ret_dict_2d['images'],
                                                     (0, 3, 1, 2))
                ret_dict_3d['images'] = tf.transpose(ret_dict_3d['images'],
                                                     (0, 3, 1, 2))
            elif self.data_format == 'NHWC':
                pass
            else:
                raise ValueError('Data format {} not recognized!'.format(
                    self.data_format))
            pack_name = ['images']

        min_after_dequeue = 32
        num_threads = 4
        capacity = min_after_dequeue + (num_threads + 10) * self.batch_size

        pack_name.extend(
            ['labels', 'fnames', 'poses', 'shape', 'gt3ds', 'has_3d'])

        # parallel_stack can't handle bool..
        ret_dict_2d['has_3d'] = tf.cast(ret_dict_2d['has_3d'],
                                        dtype=tf.float32)
        ret_dict_3d['has_3d'] = tf.cast(ret_dict_3d['has_3d'],
                                        dtype=tf.float32)
        # Stack 2d and 3d data.
        pack_these = [
            tf.parallel_stack([ret_dict_2d[key], ret_dict_3d[key]])
            for key in pack_name
        ]
        pack_these[pack_name.index('has_3d')] = tf.cast(
            pack_these[pack_name.index('has_3d')], dtype=tf.bool)

        all_batched = tf.train.shuffle_batch(
            pack_these,
            batch_size=self.batch_size,
            num_threads=num_threads,
            capacity=capacity,
            min_after_dequeue=min_after_dequeue,
            enqueue_many=True,
            name='input_batch_train')
        batch_dict = {}
        for name, batch in zip(pack_name, all_batched):
            batch_dict[name] = batch

        return batch_dict
示例#11
0
    def k_full(self, input1, input2=None):
        """Iteratively building the full NNGP kernel.
    """
        input1 = self._input_layer_normalization(input1)
        if input2 is None:
            input2 = input1
        else:
            input2 = self._input_layer_normalization(input2)

        with tf.name_scope("k_full"):
            cov_init = tf.matmul(input1, input2,
                                 transpose_b=True) / input1.shape[1].value

            self.k_diag(input1)
            q_aa_init = self.layer_qaa_dict[0]

            q_ab = cov_init
            corr = q_ab / q_aa_init[0]
            corr_init = corr
            self.layer_corr_dict = {0: corr}

            if FLAGS.fraction_of_int32 > 1:
                batch_size, batch_count = self._get_batch_size_and_count(
                    input1, input2)
                with tf.name_scope("q_ab"):
                    q_ab_all = []
                    for b_x in range(batch_count):
                        with tf.name_scope("batch_%d" % b_x):
                            corr_flat_batch = corr[batch_size *
                                                   b_x:batch_size *
                                                   (b_x + 1), :]
                            corr_flat_batch = tf.reshape(corr_flat_batch, [-1])

                            for l in xrange(self.depth):
                                with tf.name_scope("layer_%d" % l):
                                    q_aa = self.layer_qaa_dict[l]
                                    multiplier = tf.constant(10**8,
                                                             dtype=tf.float64)
                                    corr = tf.round(
                                        corr * multiplier) / multiplier
                                    q_ab = (corr * tf.math.asin(corr) +
                                            tf.math.sqrt(1 - tf.math.pow(
                                                corr, 2))) / np.pi + corr / 2
                                    q_ab = 0.5 * self.weight_var * q_ab + self.bias_var
                                    corr_flat_batch = q_ab / self.layer_qaa_dict[
                                        l + 1][0]
                                    corr = corr_flat_batch
                                    self.layer_corr_dict[l + 1] = corr

                            q_ab_all.append(q_ab)

                    q_ab_all = tf.parallel_stack(q_ab_all)
            else:
                with tf.name_scope("q_ab"):
                    corr_flat = tf.reshape(corr, [-1])
                    for l in xrange(self.depth):
                        with tf.name_scope("layer_%d" % l):
                            q_aa = self.layer_qaa_dict[l]
                            multiplier = tf.constant(10**8, dtype=tf.float64)
                            corr = tf.round(corr * multiplier) / multiplier
                            q_ab = (corr * tf.math.asin(corr) + tf.math.sqrt(
                                1 - tf.math.pow(corr, 2))) / np.pi + corr / 2
                            q_ab = 0.5 * self.weight_var * q_ab + self.bias_var
                            corr_flat = q_ab / self.layer_qaa_dict[l + 1][0]
                            corr = corr_flat
                        q_ab_all = q_ab

        return tf.reshape(q_ab_all, cov_init.shape, "qab")
示例#12
0
def build_reduce_sum(scaled_grads):
    stacked = tf.parallel_stack(values=scaled_grads)
    reduced = tf.reduce_sum(stacked, 0)
    return [reduced] * len(scaled_grads)
  def minibatch(self, dataset, subset, use_datasets, cache_data,
                shift_ratio=-1):
    if shift_ratio < 0:
      shift_ratio = self.shift_ratio
    with tf.name_scope('batch_processing'):
      # Build final results per split.
      images = [[] for _ in range(self.num_splits)]
      labels = [[] for _ in range(self.num_splits)]
      if use_datasets:
        glob_pattern = dataset.tf_record_pattern(subset)
        file_names = gfile.Glob(glob_pattern)
        if not file_names:
          raise ValueError('Found no files in --data_dir matching: {}'
                           .format(glob_pattern))
        ds = tf.data.TFRecordDataset.list_files(file_names)
        ds = ds.apply(
            interleave_ops.parallel_interleave(
                tf.data.TFRecordDataset, cycle_length=10))
        if cache_data:
          ds = ds.take(1).cache().repeat()
        counter = tf.data.Dataset.range(self.batch_size)
        counter = counter.repeat()
        ds = tf.data.Dataset.zip((ds, counter))
        ds = ds.prefetch(buffer_size=self.batch_size)
        ds = ds.shuffle(buffer_size=10000)
        ds = ds.repeat()
        ds = ds.apply(
            batching.map_and_batch(
                map_func=self.parse_and_preprocess,
                batch_size=self.batch_size_per_split,
                num_parallel_batches=self.num_splits))
        ds = ds.prefetch(buffer_size=self.num_splits)
        ds_iterator = ds.make_one_shot_iterator()
        for d in xrange(self.num_splits):
          labels[d], images[d] = ds_iterator.get_next()

      else:
        record_input = data_flow_ops.RecordInput(
            file_pattern=dataset.tf_record_pattern(subset),
            seed=301,
            parallelism=64,
            buffer_size=10000,
            batch_size=self.batch_size,
            shift_ratio=shift_ratio,
            name='record_input')
        records = record_input.get_yield_op()
        records = tf.split(records, self.batch_size, 0)
        records = [tf.reshape(record, []) for record in records]
        for idx in xrange(self.batch_size):
          value = records[idx]
          (label, image) = self.parse_and_preprocess(value, idx)
          split_index = idx % self.num_splits
          labels[split_index].append(label)
          images[split_index].append(image)

      for split_index in xrange(self.num_splits):
        if not use_datasets:
          images[split_index] = tf.parallel_stack(images[split_index])
          labels[split_index] = tf.concat(labels[split_index], 0)
        images[split_index] = tf.cast(images[split_index], self.dtype)
        depth = 3
        images[split_index] = tf.reshape(
            images[split_index],
            shape=[self.batch_size_per_split, self.height, self.width, depth])
        labels[split_index] = tf.reshape(labels[split_index],
                                         [self.batch_size_per_split])
      return images, labels
def read_tfrecord_and_decode_into_image_annotation_pair_tensors(
        tfrecord_filenames_queue):

    reader = tf.TFRecordReader()

    _, serialized_example = reader.read(tfrecord_filenames_queue)

    features, sequence_features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={
            'height': tf.FixedLenFeature([], tf.int64),
            'width': tf.FixedLenFeature([], tf.int64),
            #'time_step': tf.FixedLenFeature([], tf.int64),
            'label_len': tf.FixedLenFeature([], tf.int64),
            'data_raw': tf.FixedLenFeature([], tf.string),
        },
        sequence_features={
            'aligned_label': tf.FixedLenSequenceFeature([], tf.int64),
        })

    image = tf.decode_raw(features['data_raw'], tf.uint8)
    height = tf.cast(features['height'], tf.int32)
    width = tf.cast(features['width'], tf.int32)
    label_len = tf.cast(features['label_len'], tf.int32)

    image_shape = tf.parallel_stack([height, width, 1])
    image = tf.reshape(image, image_shape)

    img_size = cfg.IMG_SHAPE  #960,48
    time_step = tf.constant(cfg.TIME_STEP, tf.int32)

    #if cfg.NCHANNELS==1: image = tf.image.rgb_to_grayscale(image)
    image = tf.image.resize_images(image,
                                   size=(img_size[0], img_size[1]),
                                   method=tf.image.ResizeMethod.BILINEAR)
    #image = tf.transpose(image,perm=[1,0,2])
    image = tf.cast(tf.reshape(image, [img_size[0], cfg.NUM_FEATURES, 1]),
                    dtype=tf.float32) / 255.
    #label = tf.serialize_sparse(sequence_features['aligned_label'])
    label = tf.cast(sequence_features['aligned_label'], tf.int32)  ###
    label = tf.reshape(label, [cfg.MAX_CHAR_LEN])  ###
    #label = tf.serialize_sparse(sequence_features['aligned_label'])

    #indices = tf.decode_raw(sequence_features['aligned_label'],string)
    """
    batch_labels = sparse_tuple_from_label(aligned_label.eval())

    label = tf.SparseTensorValue(indices,values,shape)
    label = tf.convert_to_tensor_or_sparse_tensor(label)
    label = tf.serialize_sparse(sequence_features['aligned_label'] ) # for batching
    label = tf.deserialize_many_sparse(label, tf.int64) # post-batching...
    label = tf.cast(label, tf.int32) # for ctc_loss
    """
    #可以针对不一样长的数据。。notice
    #image_shape = tf.parallel_stack([height, width])
    #image = tf.reshape(image,image_shape)

    #img_size = cfg.IMG_SHAPE
    #time_step = tf.constant(cfg.TIME_STEP,tf.int32)

    #if cfg.NCHANNELS==1: image = tf.image.rgb_to_grayscale(image)
    ##image = tf.image.resize_images(image,size=(img_size[1],img_size[0]),method=tf.image.ResizeMethod.BILINEAR)
    ##image = tf.transpose(image,perm=[1,0,2])
    ##image = tf.cast(tf.reshape(image,[img_size[0],cfg.NUM_FEATURES]),dtype=tf.float32)/255.
    # image = tf.cast(tf.reshape(image,[img_size[0],cfg.NUM_FEATURES]),dtype=tf.float32)

    #print("cast-reshape images:",image)
    # The last dimension was added because
    # the tf.resize_image_with_crop_or_pad() accepts tensors
    # that have depth. We need resize and crop later.
    # TODO: See if it is necessary and probably remove third
    # dimension
    #annotation_shape = tf.pack([height, width, 1])
    # image = tf.reshape(image, image_shape)

    return image, label, label_len, time_step
    def minibatch(self, file_pattern):
        with tf.name_scope('batch_processing'):
            output_data = [[] for i in range(self.device_count)]
            labels = [[] for i in range(self.device_count)]
            record_input = data_flow_ops.RecordInput(
                file_pattern=file_pattern,
                seed=301,
                parallelism=64,
                buffer_size=10000,
                batch_size=self.batch_size,
                name='record_input')
            records = record_input.get_yield_op()
            records = tf.split(records, self.batch_size, 0)
            records = [tf.reshape(record, []) for record in records]
            for i in xrange(self.batch_size):
                value = records[i]
                data_buffer, label_index, _, frames = self.parse_example_proto(
                    value)

                processed_data = self.preprocess(data_buffer, frames)

                device_index = i % self.device_count
                output_data[device_index].append(processed_data)
                labels[device_index].append(label_index)
            label_index_batch = [None] * self.device_count
            for device_index in xrange(self.device_count):
                output_data[device_index] = tf.parallel_stack(
                    output_data[device_index])
                label_index_batch[device_index] = tf.concat(
                    labels[device_index], 0)

                # dynamic_pad=True) # HACK TESTING dynamic_pad=True
                output_data[device_index] = tf.cast(output_data[device_index],
                                                    self.dtype)
                if self.data_type is 'rgb':
                    depth = 3
                    output_data[device_index] = tf.reshape(
                        output_data[device_index],
                        shape=[
                            self.batch_size_per_device, self.time_window,
                            self.cropped_size[0], self.cropped_size[1], depth
                        ])
                    # shape=[self.batch_size_per_device, -1, self.cropped_size[0], self.cropped_size[1], depth])
                elif self.data_type is 'flow':
                    depth = 2
                    output_data[device_index] = tf.reshape(
                        output_data[device_index],
                        shape=[
                            self.batch_size_per_device, self.time_window,
                            self.cropped_size[0], self.cropped_size[1], depth
                        ])
                    # shape=[self.batch_size_per_device, -1, self.cropped_size[0], self.cropped_size[1], depth])
                # elif self.data_type is 'audio':
                # TBD
                else:
                    raise ('data_type error, get: ', self.data_type)
                label_index_batch[device_index] = tf.reshape(
                    label_index_batch[device_index],
                    [self.batch_size_per_device])
                # Display the training images in the visualizer.
                # tf.summary.image('images', images)

            return output_data, label_index_batch
    OUT_DIR = "out/000000410912/imwrite_vs_scipy"

    # file = tf.read_file("../tools/orig_full_000000177006_2.jpg")
    file = tf.read_file("../tools/orig_full_000000410912_2.jpg")
    file = tf.image.decode_jpeg(file)
    file = tf.expand_dims(file, 0)
    file = resize_img(file, 64, 1)
    print("file:", file)

    # orig = tf.read_file("../tools/orig_000000177006.jpg")
    orig = tf.read_file("../tools/orig_000000410912.jpg")
    orig = tf.image.decode_jpeg(orig)
    orig = tf.image.flip_left_right(orig)
    size = tf.minimum(427, 640)
    crop_shape = tf.parallel_stack([size, size, 3])
    orig = tf.random_crop(orig, crop_shape, seed=4285)
    origimg = orig
    orig = None

    orig = tf.image.resize_images(origimg, [64, 64], method=tf.image.ResizeMethod.BILINEAR)
    orig = prep(orig)
    print("orig:", orig)
    orignn = tf.image.resize_images(origimg, [64, 64], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    orignn = prep(orignn)
    origbi = tf.image.resize_images(origimg, [64, 64], method=tf.image.ResizeMethod.BICUBIC)
    origbi = prep(origbi)
    origar = tf.image.resize_images(origimg, [64, 64], method=tf.image.ResizeMethod.AREA)
    origar = prep(origar)

    sess.run(tf.global_variables_initializer())
示例#17
0
    def minibatch(self, dataset, subset, params, shift_ratio=-1):
        del shift_ratio

        options = dataset.options

        if dataset.loading_thread is None:
            dataset.start_prefetch_threads = True
            with tf.name_scope('enqueue_data'):
                dataset.loading_buffer = Queue(3 * dataset.num_loading_threads)
            dataset.loading_thread = Thread(target=self._pre_fectch_thread,
                                            args=(dataset,
                                                  dataset.loading_buffer))
            dataset.loading_thread.start()

        def __gen(c_id):
            return dataset.loading_buffer.get()
            # img, lb = dataset.loading_buffer.get()
            # img.set_shape([_BATCH_SIZE, _CROP_SIZE, _CROP_SIZE, 3])
            # lb.set_shape([_BATCH_SIZE, 3])
            # return img, lb

        def __set_shape(img, label):
            img.set_shape(
                [self.batch_size, options.crop_size, options.crop_size, 3])
            if options.use_triplet_loss:
                label.set_shape([self.batch_size, 3])
            else:
                label.set_shape([self.batch_size])
            return img, label

        n = dataset.num_examples_per_epoch(subset)
        index_list = [i for i in range(n)]
        with tf.name_scope('batch_processing'):
            dd = tf.data.Dataset.from_tensors(index_list)
            if options.net_mode == 'triple_loss':
                dd = dd.map(lambda c_id: tuple(
                    tf.py_func(__gen, [c_id], [tf.float32, tf.float32])))
            else:
                dd = dd.map(lambda c_id: tuple(
                    tf.py_func(__gen, [c_id], [tf.float32, tf.int32])))
            dd = dd.map(__set_shape)
            dd = dd.repeat()
            print('batch_processing output shape')
            print(dd.output_shapes)

            iter = dd.make_one_shot_iterator()
            input_image, input_label = iter.get_next()

            images = [[] for i in range(self.num_splits)]
            labels = [[] for i in range(self.num_splits)]

            # Create a list of size batch_size, each containing one image of the
            # batch. Without the unstack call, raw_images[i] would still access the
            # same image via a strided_slice op, but would be slower.
            raw_images = tf.unstack(input_image, axis=0)
            raw_labels = tf.unstack(input_label, axis=0)
            single_len = self.batch_size // self.num_splits
            split_index = -1
            for i in xrange(self.batch_size):
                if i % single_len == 0:
                    split_index += 1
                images[split_index].append(raw_images[i])
                labels[split_index].append(raw_labels[i])

            for split_index in xrange(self.num_splits):
                images[split_index] = tf.parallel_stack(images[split_index])
                labels[split_index] = tf.parallel_stack(labels[split_index])
            return images, labels
示例#18
0
def read_record_scale(filename_queue, reader, image_size, scale, crop=True):
    _, serialized_example = reader.read(filename_queue)

    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'image/height':
                                           tf.FixedLenFeature([], tf.int64),
                                           'image/width':
                                           tf.FixedLenFeature([], tf.int64),
                                           'image/filename':
                                           tf.FixedLenFeature([], tf.string),
                                           'image/knn/t1':
                                           tf.VarLenFeature(tf.int64),
                                           'image/knn/t1s':
                                           tf.VarLenFeature(tf.int64),
                                           'image/knn/t1L2':
                                           tf.VarLenFeature(tf.float32),
                                           'image/knn/t2':
                                           tf.VarLenFeature(tf.int64),
                                           'image/knn/t2s':
                                           tf.VarLenFeature(tf.int64),
                                           'image/knn/t2L2':
                                           tf.VarLenFeature(tf.float32),
                                           'image/knn/t3':
                                           tf.VarLenFeature(tf.int64),
                                           'image/knn/t3s':
                                           tf.VarLenFeature(tf.int64),
                                           'image/knn/t3L2':
                                           tf.VarLenFeature(tf.float32),
                                           'image/knn/t4':
                                           tf.VarLenFeature(tf.int64),
                                           'image/knn/t4s':
                                           tf.VarLenFeature(tf.int64),
                                           'image/knn/t4L2':
                                           tf.VarLenFeature(tf.float32),
                                           'image/encoded':
                                           tf.FixedLenFeature([], tf.string)
                                       })

    img_h = features['image/height']
    img_h = tf.cast(img_h, tf.int32)
    img_w = features['image/width']
    img_w = tf.cast(img_w, tf.int32)
    filename = features['image/filename']

    t1_10nn_ids = features['image/knn/t1']
    t1_10nn_subids = features['image/knn/t1s']
    t1_10nn_L2 = features['image/knn/t1L2']
    t2_10nn_ids = features['image/knn/t2']
    t2_10nn_subids = features['image/knn/t2s']
    t2_10nn_L2 = features['image/knn/t2L2']
    t3_10nn_ids = features['image/knn/t3']
    t3_10nn_subids = features['image/knn/t3s']
    t3_10nn_L2 = features['image/knn/t3L2']
    t4_10nn_ids = features['image/knn/t4']
    t4_10nn_subids = features['image/knn/t4s']
    t4_10nn_L2 = features['image/knn/t4L2']

    orig_image = features['image/encoded']

    oi1 = tf.image.decode_jpeg(orig_image)
    if crop:
        size = tf.minimum(img_h, img_w)
        if scale:
            size = tf.cast(tf.round(tf.divide(tf.multiply(size, scale), 10)),
                           tf.int32)
        size = tf.maximum(size, image_size)
        crop_shape = tf.parallel_stack([size, size, 3])
        image = tf.random_crop(oi1, crop_shape, seed=4285)
    else:
        image = oi1
    image = tf.image.resize_images(image, [image_size, image_size],
                                   method=tf.image.ResizeMethod.AREA)
    image = tf.reshape(image, (image_size, image_size, 3))
    image = tf.cast(image, tf.float32) * (2. / 255) - 1

    return filename, image, t1_10nn_ids, t1_10nn_subids, t1_10nn_L2, t2_10nn_ids, t2_10nn_subids, t2_10nn_L2, \
           t3_10nn_ids, t3_10nn_subids, t3_10nn_L2, t4_10nn_ids, t4_10nn_subids, t4_10nn_L2
示例#19
0
 def minibatch(self):
     """
     Returns minibatch of images and labels from TF records file.
     """
     with tf.name_scope('pipeline'):
         ds = tf.data.Dataset.from_generator(self.generator, (tf.int64),
                                             (tf.TensorShape([])))
         if self.mode == 'train':
             max_num_records = self.params['num_epochs'] * self.params[
                 'NUM_EXAMPLES_PER_EPOCH']
             ds = ds.take(max_num_records)
             ds = ds.prefetch(min(1, self.num_samples))
             ds = ds.batch(self.params['batch_size'], drop_remainder=True)
             #ds = ds.map(self.wrapped_decode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
             ds = ds.map(self.wrapped_decode)
             iterator = ds.make_one_shot_iterator()
             images, labels = [], []
             for _ in range(self.params['batch_size']):
                 image, label = iterator.get_next()
                 image = tf.reshape(image, self.data_specs['image_shape'])
                 images.append(
                     tf.reshape(image, self.data_specs['image_shape']))
                 labels.append(
                     tf.reshape(label, self.data_specs['label_shape']))
         elif self.mode == 'eval':
             ds = ds.take(self.num_samples)
             ds = ds.batch(self.params['batch_size'], drop_remainder=True)
             ds = ds.map(self.wrapped_decode)
             iterator = ds.make_one_shot_iterator()
             images, labels = [], []
             if self.params[self.mode + '_distort']:
                 print('images will be distorted')
             for _ in range(self.params['batch_size']):
                 image, label = iterator.get_next()
                 image = tf.reshape(image, self.data_specs['image_shape'])
                 images.append(
                     tf.reshape(image, self.data_specs['image_shape']))
                 labels.append(
                     tf.reshape(label, self.data_specs['label_shape']))
         if tf.executing_eagerly():
             images = tf.stack(images)
             labels = tf.stack(labels)
         else:
             images = tf.parallel_stack(images)
             labels = tf.parallel_stack(labels)
         # reshape them to the expected shape:
         labels_newshape = [self.params['batch_size']
                            ] + self.data_specs['label_shape']
         images_newshape = [self.params['batch_size']
                            ] + self.data_specs['image_shape']
         labels = tf.reshape(labels, labels_newshape)
         images = tf.reshape(images, images_newshape)
         #labels = self.image_scaling(labels)
         images = self.image_scaling(images)
     # data augmentation
     if self.params[self.mode + '_distort']:
         with tf.device('/gpu:%i' % hvd.local_rank()):
             if self.params.get('random_crop', False):
                 images = tf.transpose(images, perm=[0, 2, 3, 1])
                 images = self.random_crop_resize(images)
                 images = self.add_noise_image(images)
                 images = tf.transpose(images, perm=[0, 3, 1, 2])
             else:
                 images = self.add_noise_image(images)
     return images, labels
示例#20
0
    def minibatch(self):
        """
        Returns minibatch of images and labels from TF records file.
        """
        mode = self.mode
        batch_size = self.params['batch_size']
        if mode not in ['train', 'validation', 'test']:
            mode = 'train'

        if self.debug: self.inspect_tfrecords(mode)

        record_input = data_flow_ops.RecordInput(
            file_pattern=os.path.join(self.params['data_dir'], '*.tfrecords'),
            parallelism=self.params['IO_threads'],
            buffer_size=self.params['buffer_cap'],
            batch_size=batch_size)
        records = record_input.get_yield_op()

        # Split batch into individual images
        records = tf.split(records, batch_size, 0)
        records = [tf.reshape(record, []) for record in records]
        #print('record contents %s' %(format(records)))
        #print('record length %s and contents %s' %(len(records),format(records)))
        # Deserialize and preprocess images into batches for each device
        images = []
        labels = []
        with tf.name_scope('input_pipeline'):
            if self.params[mode + '_distort']:
                print_rank('images will be distorted')

            for i, record in enumerate(records):
                image, label = self.decode_image_label(record)
                if self.params[mode + '_distort']:
                    # image = self.add_noise_image(image)
                    image = self.distort(image)
                images.append(image)
                labels.append(label)
                image_shape = image.get_shape().as_list()
                label_shape = label.get_shape().as_list()
            # Stack images and labels back into a single tensor
            labels = tf.parallel_stack(labels)
            images = tf.parallel_stack(images)

            # reshape them to the expected shape:
            labels_newshape = [batch_size] + label_shape
            images_newshape = [batch_size] + image_shape
            labels = tf.reshape(labels, labels_newshape)
            images = tf.reshape(images, images_newshape)

            # glimpse images: moved to GPU
            #images = self.get_glimpses(images)

            # Display the training images in the Tensorboard visualizer.
            if self.debug: tf.summary.image("images", images, max_outputs=4)

            # resize
            if self.params['resize']:
                images = tf.image.resize_bilinear(images, [
                    self.params['RESIZE_WIDTH'], self.params['RESIZE_HEIGHT']
                ])
            if self.params['tile']:
                images = tf.ones([
                    self.params['IMAGE_DEPTH'], self.params['IMAGE_HEIGHT'],
                    self.params['IMAGE_WIDTH']
                ],
                                 dtype=self.params['IMAGE_DTYPE'])
                labels = tf.ones([256, 512, 512],
                                 dtype=self.params['LABEL_DTYPE'])

        return images, labels
示例#21
0
    def get_loader_w3d(self):
        """
        Similar to get_loader, but outputs are:
          image_batch: batched images as per data_format
          label_batch: batched keypoint labels N x K x 3
          label3d_batch: batched keypoint labels N x (216 + 10 + 42)
                         216=24*3*3 pose, 10 shape, 42=14*3 3D joints
                         (3D datasets only have 14 joints annotated)
          has_gt3d_batch: batched indicator for
                          existence of [3D joints, 3D SMPL] labels N x 2 - bool
                          Note 3D SMPL is only available for H3.6M.


        Problem is that those datasets without pose/shape do not have them
        in the tfrecords. There's no way to check for this in TF,
        so, instead make 2 string_input_producers, one for data without 3d
        and other for data with 3d.
        And send [2 x *] to train.*batch
        """
        datasets_no3d = [d for d in self.datasets if d not in _3D_DATASETS]
        datasets_yes3d = [d for d in self.datasets if d in _3D_DATASETS]

        files_no3d = data_utils.get_all_files(self.dataset_dir, datasets_no3d)
        files_yes3d = data_utils.get_all_files(self.dataset_dir,
                                               datasets_yes3d)

        # Make sure we have dataset with 3D.
        if len(files_yes3d) == 0:
            print("Dont run this without any datasets with gt 3d")
            import ipdb
            ipdb.set_trace()
            exit(1)

        do_shuffle = True

        fqueue_yes3d = tf.train.string_input_producer(files_yes3d,
                                                      shuffle=do_shuffle,
                                                      name="input_w3d")
        image, label, label3d, has_smpl3d = self.read_data(fqueue_yes3d,
                                                           has_3d=True)

        if len(files_no3d) != 0:
            fqueue_no3d = tf.train.string_input_producer(files_no3d,
                                                         shuffle=do_shuffle,
                                                         name="input_wout3d")
            image_no3d, label_no3d = self.read_data(fqueue_no3d, has_3d=False)
            label3d_no3d = tf.zeros_like(label3d)
            image = tf.parallel_stack([image, image_no3d])
            label = tf.parallel_stack([label, label_no3d])
            label3d = tf.parallel_stack([label3d, label3d_no3d])
            # 3D joint is always available for data with 3d.
            has_3d_joints = tf.constant([True, False], dtype=tf.bool)
            has_3d_smpl = tf.concat([has_smpl3d, [False]], axis=0)
        else:
            # If no "no3d" images, need to make them 1 x *
            image = tf.expand_dims(image, 0)
            label = tf.expand_dims(label, 0)
            label3d = tf.expand_dims(label3d, 0)
            has_3d_joints = tf.constant([True], dtype=tf.bool)
            has_3d_smpl = has_smpl3d

        # Combine 3D bools.
        # each is 2 x 1, column is [3d_joints, 3d_smpl]
        has_3dgt = tf.stack([has_3d_joints, has_3d_smpl], axis=1)

        min_after_dequeue = 2000
        capacity = min_after_dequeue + 3 * self.batch_size

        image_batch, label_batch, label3d_batch, bool_batch = tf.train.shuffle_batch(
            [image, label, label3d, has_3dgt],
            batch_size=self.batch_size,
            num_threads=8,
            capacity=capacity,
            min_after_dequeue=min_after_dequeue,
            enqueue_many=True,
            name='input_batch_train_3d')

        if self.data_format == 'NCHW':
            image_batch = tf.transpose(image_batch, [0, 3, 1, 2])
        elif self.data_format == 'NHWC':
            pass
        else:
            raise Exception("[!] Unkown data_format: {}".format(
                self.data_format))

        batch_dict = {
            'image': image_batch,
            'label': label_batch,
            'label3d': label3d_batch,
            'has3d': bool_batch,
        }

        return batch_dict
示例#22
0
  def minibatch(self, dataset, subset, use_datasets, shift_ratio=-1):
    if shift_ratio < 0:
      shift_ratio = self.shift_ratio
    with tf.name_scope('batch_processing'):
      # Build final results per split.
      images = [[] for _ in range(self.num_splits)]
      labels = [[] for _ in range(self.num_splits)]
      if use_datasets:
        glob_pattern = dataset.tf_record_pattern(subset)
        file_names = gfile.Glob(glob_pattern)
        if not file_names:
          raise ValueError('Found no files in --data_dir matching: {}'
                           .format(glob_pattern))
        ds = tf.contrib.data.TFRecordDataset(file_names)
        counter = tf.contrib.data.Dataset.range(self.batch_size)
        counter = counter.repeat()
        ds = tf.contrib.data.Dataset.zip((ds, counter))
        ds = ds.map(
            self.parse_and_preprocess,
            num_parallel_calls=self.batch_size)
        ds = ds.prefetch(buffer_size=self.batch_size)
        ds = ds.shuffle(buffer_size=10000)
        ds = ds.repeat()
        ds_iterator = ds.make_one_shot_iterator()
        # TODO(jsimsa): Use datasets' batch transformation instead of (see
        # below) once the transformation implements parallel data copy.
        #
        # NOTE: The current implementation does not preserve the order of
        # elements between the shuffle buffer and the batch.
        for idx in xrange(self.batch_size):
          label, image = ds_iterator.get_next()
          split_index = idx % self.num_splits
          labels[split_index].append(label)
          images[split_index].append(image)

      else:
        record_input = data_flow_ops.RecordInput(
            file_pattern=dataset.tf_record_pattern(subset),
            seed=301,
            parallelism=64,
            buffer_size=10000,
            batch_size=self.batch_size,
            shift_ratio=shift_ratio,
            name='record_input')
        records = record_input.get_yield_op()
        records = tf.split(records, self.batch_size, 0)
        records = [tf.reshape(record, []) for record in records]
        for idx in xrange(self.batch_size):
          value = records[idx]
          (label, image) = self.parse_and_preprocess(value, idx)
          split_index = idx % self.num_splits
          labels[split_index].append(label)
          images[split_index].append(image)

      for split_index in xrange(self.num_splits):
        images[split_index] = tf.parallel_stack(images[split_index])
        labels[split_index] = tf.concat(labels[split_index], 0)
        images[split_index] = tf.cast(images[split_index], self.dtype)
        depth = 3
        images[split_index] = tf.reshape(
            images[split_index],
            shape=[self.batch_size_per_split, self.height, self.width, depth])
        labels[split_index] = tf.reshape(labels[split_index],
                                         [self.batch_size_per_split])
      return images, labels
示例#23
0
def input_fn(subset, num_shards):
  """Create input graph for model.

  Args:
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  dataset = cifar10.Cifar10DataSet(FLAGS.data_dir)
  is_training = (subset == 'train')
  if is_training:
    batch_size = FLAGS.train_batch_size
  else:
    batch_size = FLAGS.eval_batch_size
  with tf.device('/cpu:0'), tf.name_scope('batching'):
    # CPU loads all data from disk since there're only 60k 32*32 RGB images.
    all_images, all_labels = dataset.read_all_data(subset)
    dataset = tf.contrib.data.Dataset.from_tensor_slices(
        (all_images, all_labels))
    dataset = dataset.map(
        lambda x, y: (tf.cast(x, tf.float32), tf.cast(y, tf.int32)),
        num_threads=2,
        output_buffer_size=batch_size)

    # Image preprocessing.
    def _preprocess(image, label):
      # If GPU is available, NHWC to NCHW transpose is done in ResNetCifar10
      # class, not included in preprocessing.
      return cifar10.Cifar10DataSet.preprocess(
          image, is_training, FLAGS.use_distortion_for_training), label
    dataset = dataset.map(
        _preprocess, num_threads=batch_size, output_buffer_size=2 * batch_size)
    # Repeat infinitely.
    dataset = dataset.repeat()
    if is_training:
      min_fraction_of_examples_in_queue = 0.4
      min_queue_examples = int(
          cifar10.Cifar10DataSet.num_examples_per_epoch(subset) *
          min_fraction_of_examples_in_queue)
      # Ensure that the capacity is sufficiently large to provide good random
      # shuffling
      dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)
    dataset = dataset.batch(batch_size)
    iterator = dataset.make_one_shot_iterator()
    image_batch, label_batch = iterator.get_next()
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards
示例#24
0
    def minibatch(self, dataset, subset, use_data_sets):
        with tf.name_scope('batch_processing'):
            images = [[] for _ in range(self.num_splits)]
            labels = [[] for _ in range(self.num_splits)]
            if use_data_sets:
                file_names = glob.glob(dataset.tf_record_pattern(subset))
                ds = tf.contrib.data.TFRecordDataset(file_names)
                counter = tf.contrib.data.Dataset.range(self.batch_size)
                counter = counter.repeat()
                ds = tf.contrib.data.Dataset.zip((ds, counter))
                ds = ds.map(self.parse_and_preprocess,
                            num_parallel_calls=self.batch_size,
                            output_buffer_size=self.batch_size)
                ds = ds.shuffle(buffer_size=10000)
                ds = ds.repeat()
                ds = ds.batch(batch_size=(self.batch_size / self.num_splits))
                ds_iterator = ds.make_one_shot_iterator()

                for d in xrange(self.num_splits):
                    labels[d], images[d] = ds_iterator.get_next()

            else:
                # Build final results per split.
                record_input = data_flow_ops.RecordInput(
                    file_pattern=dataset.tf_record_pattern(subset),
                    seed=301,
                    parallelism=64,
                    buffer_size=10000,
                    batch_size=self.batch_size,
                    shift_ratio=self.shift_ratio,
                    name='record_input')
                records = record_input.get_yield_op()
                records = tf.split(records, self.batch_size, 0)
                records = [tf.reshape(record, []) for record in records]
                for idx in xrange(self.batch_size):
                    value = records[idx]
                    (label_index,
                     image) = self.parse_and_preprocess(value, idx)
                    split_index = idx % self.num_splits
                    images[split_index].append(image)
                    labels[split_index].append(label_index)

            label_index_batch = [None] * self.num_splits
            for split_index in xrange(self.num_splits):
                if use_data_sets:
                    label_index_batch[split_index] = labels[split_index]
                else:
                    images[split_index] = tf.parallel_stack(
                        images[split_index])
                    label_index_batch[split_index] = tf.concat(
                        labels[split_index], 0)
                images[split_index] = tf.cast(images[split_index], self.dtype)
                depth = 3
                images[split_index] = tf.reshape(images[split_index],
                                                 shape=[
                                                     self.batch_size_per_split,
                                                     self.height, self.width,
                                                     depth
                                                 ])
                label_index_batch[split_index] = tf.reshape(
                    label_index_batch[split_index],
                    [self.batch_size_per_split])

            return images, label_index_batch
示例#25
0
def read_tfrecord_and_decode_into_image_annotation_pair_tensors(
        tfrecord_filenames_queue):
    """Return image/annotation tensors that are created by reading tfrecord file.
    The function accepts tfrecord filenames queue as an input which is usually
    can be created using tf.train.string_input_producer() where filename
    is specified with desired number of epochs. This function takes queue
    produced by aforemention tf.train.string_input_producer() and defines
    tensors converted from raw binary representations into
    reshaped image/annotation tensors.
    Parameters
    ----------
    tfrecord_filenames_queue : tfrecord filename queue
        String queue object from tf.train.string_input_producer()
    
    Returns
    -------
    image, annotation : tuple of tf.int32 (image, annotation)
        Tuple of image/annotation tensors
    """

    reader = tf.TFRecordReader()

    _, serialized_example = reader.read(tfrecord_filenames_queue)

    features, sequence_features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={
            'height': tf.FixedLenFeature([], tf.int64),
            'width': tf.FixedLenFeature([], tf.int64),
            'time_step': tf.FixedLenFeature([], tf.int64),
            'label_len': tf.FixedLenFeature([], tf.int64),
            'image_raw': tf.FixedLenFeature([], tf.string),
        },
        sequence_features={
            'label': tf.FixedLenSequenceFeature([], tf.int64),
        })

    image = tf.decode_raw(features['image_raw'], tf.uint8)

    height = tf.cast(features['height'], tf.int32)
    width = tf.cast(features['width'], tf.int32)
    label_len = tf.cast(features['label_len'], tf.int32)
    #print(cfg.MAX_CHAR_LEN)
    label = tf.cast(sequence_features['label'], tf.int32)
    label = tf.reshape(label, [cfg.MAX_CHAR_LEN])
    #image_shape = tf.pack([height, width, 3])
    image_shape = tf.parallel_stack([height, width, 3])
    image = tf.reshape(image, image_shape)

    img_size = cfg.IMG_SHAPE  #650,50
    time_step = tf.constant(cfg.TIME_STEP, tf.int32)

    if cfg.NCHANNELS == 1: image = tf.image.rgb_to_grayscale(image)
    image = tf.image.resize_images(image,
                                   size=(img_size[1], img_size[0]),
                                   method=tf.image.ResizeMethod.BILINEAR)
    image = tf.transpose(image, perm=[1, 0, 2])
    image = tf.cast(tf.reshape(image, [img_size[0], cfg.NUM_FEATURES]),
                    dtype=tf.float32) / 255.

    return image, label, label_len, time_step
示例#26
0
def build_reduce_sum(scaled_grads):
    stacked = tf.parallel_stack(values=scaled_grads)
    reduced = tf.reduce_sum(stacked, 0)
    return [reduced] * len(scaled_grads)
示例#27
0
    def minibatch(self,
                  dataset,
                  subset,
                  use_datasets,
                  cache_data,
                  shift_ratio=-1):
        if shift_ratio < 0:
            shift_ratio = self.shift_ratio
        with tf.compat.v1.name_scope('batch_processing'):
            # Build final results per split.
            images = [[] for _ in range(self.num_splits)]
            labels = [[] for _ in range(self.num_splits)]
            if use_datasets:
                glob_pattern = dataset.tf_record_pattern(subset)
                file_names = gfile.Glob(glob_pattern)
                if not file_names:
                    raise ValueError(
                        'Found no files in --data_dir matching: {}'.format(
                            glob_pattern))
                ds = tf.data.TFRecordDataset.list_files(file_names)
                ds = ds.apply(
                    #interleave_ops.parallel_interleave(
                    parallel_interleave(  #
                        tf.data.TFRecordDataset, cycle_length=10))
                if cache_data:
                    ds = ds.take(1).cache().repeat()
                counter = tf.data.Dataset.range(self.batch_size)
                counter = counter.repeat()
                ds = tf.data.Dataset.zip((ds, counter))
                ds = ds.prefetch(buffer_size=self.batch_size)
                ds = ds.shuffle(buffer_size=10000)
                ds = ds.repeat()
                ds = ds.apply(
                    #batching.map_and_batch(
                    map_and_batch(  ###
                        map_func=self.parse_and_preprocess,
                        batch_size=self.batch_size_per_split,
                        num_parallel_batches=self.num_splits))
                ds = ds.prefetch(buffer_size=self.num_splits)
                ds_iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
                for d in xrange(self.num_splits):
                    labels[d], images[d] = ds_iterator.get_next()

            else:
                record_input = data_flow_ops.RecordInput(
                    file_pattern=dataset.tf_record_pattern(subset),
                    seed=301,
                    parallelism=64,
                    buffer_size=10000,
                    batch_size=self.batch_size,
                    shift_ratio=shift_ratio,
                    name='record_input')
                records = record_input.get_yield_op()
                records = tf.split(records, self.batch_size, 0)
                records = [tf.reshape(record, []) for record in records]
                for idx in xrange(self.batch_size):
                    value = records[idx]
                    (label, image) = self.parse_and_preprocess(value, idx)
                    split_index = idx % self.num_splits
                    labels[split_index].append(label)
                    images[split_index].append(image)

            for split_index in xrange(self.num_splits):
                if not use_datasets:
                    images[split_index] = tf.parallel_stack(
                        images[split_index])
                    labels[split_index] = tf.concat(labels[split_index], 0)
                images[split_index] = tf.cast(images[split_index], self.dtype)
                depth = 3
                images[split_index] = tf.reshape(images[split_index],
                                                 shape=[
                                                     self.batch_size_per_split,
                                                     self.height, self.width,
                                                     depth
                                                 ])
                labels[split_index] = tf.reshape(labels[split_index],
                                                 [self.batch_size_per_split])
            return images, labels
示例#28
0
    def k_full(self, input1, input2=None):
        """Iteratively building the full NNGP kernel.
    """
        input1 = self._input_layer_normalization(input1)
        if input2 is None:
            input2 = input1
        else:
            input2 = self._input_layer_normalization(input2)

        with tf.name_scope("k_full"):
            cov_init = tf.matmul(input1, input2,
                                 transpose_b=True) / input1.shape[1].value

            self.k_diag(input1)
            q_aa_init = self.layer_qaa_dict[0]

            q_ab = cov_init
            q_ab = self.weight_var * q_ab + self.bias_var
            corr = q_ab / q_aa_init[0]

            if FLAGS.fraction_of_int32 > 1:
                batch_size, batch_count = self._get_batch_size_and_count(
                    input1, input2)
                with tf.name_scope("q_ab"):
                    q_ab_all = []
                    for b_x in range(batch_count):
                        with tf.name_scope("batch_%d" % b_x):
                            corr_flat_batch = corr[batch_size *
                                                   b_x:batch_size *
                                                   (b_x + 1), :]
                            corr_flat_batch = tf.reshape(corr_flat_batch, [-1])

                            for l in range(self.depth):
                                with tf.name_scope("layer_%d" % l):
                                    q_aa = self.layer_qaa_dict[l]
                                    q_ab = interp.interp_lin_2d(
                                        x=self.var_aa_grid,
                                        y=self.corr_ab_grid,
                                        z=self.qab_grid,
                                        xp=q_aa,
                                        yp=corr_flat_batch)

                                    q_ab = self.weight_var * q_ab + self.bias_var
                                    corr_flat_batch = q_ab / self.layer_qaa_dict[
                                        l + 1][0]

                            q_ab_all.append(q_ab)

                    q_ab_all = tf.parallel_stack(q_ab_all)
            else:
                with tf.name_scope("q_ab"):
                    corr_flat = tf.reshape(corr, [-1])
                    for l in range(self.depth):
                        with tf.name_scope("layer_%d" % l):
                            q_aa = self.layer_qaa_dict[l]
                            q_ab = interp.interp_lin_2d(x=self.var_aa_grid,
                                                        y=self.corr_ab_grid,
                                                        z=self.qab_grid,
                                                        xp=q_aa,
                                                        yp=corr_flat)
                            q_ab = self.weight_var * q_ab + self.bias_var
                            corr_flat = q_ab / self.layer_qaa_dict[l + 1][0]
                        q_ab_all = q_ab

        return tf.reshape(q_ab_all, cov_init.shape, "qab")
示例#29
0
    def minibatch(self, dataset, subset, use_data_sets):
        with tf.name_scope('batch_processing'):
            images = [[] for i in range(self.device_count)]
            labels = [[] for i in range(self.device_count)]
            if use_data_sets:
                file_names = glob.glob(dataset.tf_record_pattern(subset))
                batch_size_per = self.batch_size / self.device_count
                num_threads = 10
                output_buffer_size = num_threads * 2000

                counter = tf.data.Dataset.range(sys.maxint)
                ds = tf.data.TFRecordDataset(file_names)
                ds = tf.data.Dataset.zip((ds, counter))
                ds = ds.map(self.parse_and_preprocess,
                            num_parallel_calls=num_threads).prefetch(
                                output_buffer_size)
                shuffle_buffer_size = 10000
                ds = ds.shuffle(shuffle_buffer_size)
                repeat_count = -1  # infinite repetition
                ds = ds.repeat(repeat_count)
                ds = ds.batch(batch_size_per)
                ds_iterator = ds.make_one_shot_iterator()

                for d in xrange(self.device_count):
                    labels[d], images[d] = ds_iterator.get_next()

            else:
                # Build final results per device.
                record_input = data_flow_ops.RecordInput(
                    file_pattern=dataset.tf_record_pattern(subset),
                    seed=301,
                    parallelism=64,
                    buffer_size=10000,
                    batch_size=self.batch_size,
                    shift_ratio=self.shift_ratio,
                    name='record_input')
                records = record_input.get_yield_op()
                records = tf.split(records, self.batch_size, 0)
                records = [tf.reshape(record, []) for record in records]
                for i in xrange(self.batch_size):
                    value = records[i]
                    (label_index,
                     image) = self.parse_and_preprocess(value, i % 4)
                    device_index = i % self.device_count
                    images[device_index].append(image)
                    labels[device_index].append(label_index)

            label_index_batch = [None] * self.device_count
            for device_index in xrange(self.device_count):
                if use_data_sets:
                    label_index_batch[device_index] = labels[device_index]
                else:
                    images[device_index] = tf.parallel_stack(
                        images[device_index])
                    label_index_batch[device_index] = tf.concat(
                        labels[device_index], 0)
                images[device_index] = tf.cast(images[device_index],
                                               self.dtype)
                depth = 3
                images[device_index] = tf.reshape(
                    images[device_index],
                    shape=[
                        self.batch_size_per_device, self.height, self.width,
                        depth
                    ])
                label_index_batch[device_index] = tf.reshape(
                    label_index_batch[device_index],
                    [self.batch_size_per_device])
                if FLAGS.summary_verbosity >= 2:
                    # Display the training images in the visualizer.
                    tf.summary.image('images', images)

            return images, label_index_batch
示例#30
0
    # Decode the record read by the reader
    features = tf.parse_example(exs, features=feature)

    #image = tf.decode_raw(features['image/encoded'], tf.uint8)
    image = features['image/encoded']
    name = features['image/filename']
    img_h = features['image/height']
    img_w = features['image/width']
    img_h = tf.cast(img_h, tf.int32)
    img_w = tf.cast(img_w, tf.int32)

    # TODO try same but which batch_size = 4 ie tensor of dim 4

    img = tf.image.decode_jpeg(image[0])
    img_shape = tf.parallel_stack([img_h[0], img_w[0], 3])
    image1 = tf.reshape(img, img_shape)
    img = tf.image.decode_jpeg(image[1])
    img_shape = tf.parallel_stack([img_h[1], img_w[1], 3])
    image2 = tf.reshape(img, img_shape)
    img = tf.image.decode_jpeg(image[2])
    img_shape = tf.parallel_stack([img_h[2], img_w[2], 3])
    image3 = tf.reshape(img, img_shape)
    img = tf.image.decode_jpeg(image[3])
    img_shape = tf.parallel_stack([img_h[3], img_w[3], 3])
    image4 = tf.reshape(img, img_shape)

    t1 = tf.image.crop_to_bounding_box(image1, 0, 0, 64, 64)
    t1 = tf.reshape(t1, (64,64,3))
    t2 = tf.image.crop_to_bounding_box(image2, 0, 0, 64, 64)
    t2 = tf.reshape(t2, (64,64,3))
示例#31
0
文件: util.py 项目: meng-jia/wenzheng
 def stack(x):
   try:
     return tf.parallel_stack(x)
   except Exception:
     return tf.stack(x)
示例#32
0
    def __init__(self, *args, **kwargs):
        super(DataFlow, self).__init__(*args, **kwargs)
        self.pattern = 'tf_records_train/train*'

        cpu_device = '/cpu:0'

        # Preprocessing
        with tf.device(cpu_device):
            file_pattern = os.path.join(self.data_dir, self.pattern)
            record_input = RecordInput(file_pattern=file_pattern,
                                       seed=Record_seed,
                                       parallelism=32,
                                       buffer_size=4000,
                                       batch_size=self.batch_size,
                                       shift_ratio=0,
                                       name='record_input')
            records = record_input.get_yield_op()
            records = tf.split(records, self.batch_size, 0)
            records = [tf.reshape(record, []) for record in records]
            images = []
            labels = []
            for idx in xrange(self.batch_size):
                value = records[idx]
                if self.with_labels:
                    image, label = self.parse_example_proto_and_process(value)
                    labels.append(label)
                else:
                    image = self.parse_example_proto_and_process(value)
                images.append(image)
            if self.with_labels:
                labels = tf.parallel_stack(labels, 0)
                labels = tf.reshape(labels, [self.batch_size])

            images = tf.parallel_stack(images)
            images = tf.reshape(images,
                                shape=[
                                    self.batch_size, self.output_size,
                                    self.output_size, self.c_dim
                                ])

            if self.format == 'NCHW':
                images = tf.transpose(images, [0, 3, 1, 2])
            images_shape = images.get_shape()
            if self.with_labels:
                labels_shape = labels.get_shape()
                image_producer_stage = StagingArea(
                    dtypes=[tf.float32, tf.int32],
                    shapes=[images_shape, labels_shape])
                image_producer_op = image_producer_stage.put([images, labels])
                image_producer_stage_get = image_producer_stage.get()
                images_and_labels = tf.tuple(
                    [image_producer_stage_get[0], image_producer_stage_get[1]],
                    control_inputs=[image_producer_op])
                images = images_and_labels[0]
                labels = images_and_labels[1]
            else:
                image_producer_stage = StagingArea(dtypes=[tf.float32],
                                                   shapes=[images_shape])
                image_producer_op = image_producer_stage.put([images])
                image_producer_stage_get = image_producer_stage.get()[0]
                images = tf.tuple([image_producer_stage_get],
                                  control_inputs=[image_producer_op])[0]

        self.images = images
        self.image_producer_op = image_producer_op
        if self.format == 'NCHW':
            self.shape = [self.c_dim, self.output_size, self.output_size]
        elif self.format == 'NHWC':
            self.shape = [self.output_size, self.output_size, self.c_dim]
        if self.with_labels:
            self.labels = labels
示例#33
0
    def get_loader_w3d(self):
        """
        Similar to get_loader, but outputs are:
          image_batch: batched images as per data_format
          label_batch: batched keypoint labels N x K x 3
          label3d_batch: batched keypoint labels N x (216 + 10 + 42)
                         216=24*3*3 pose, 10 shape, 42=14*3 3D joints
                         (3D datasets only have 14 joints annotated)
          has_gt3d_batch: batched indicator for
                          existence of [3D joints, 3D SMPL] labels N x 2 - bool
                          Note 3D SMPL is only available for H3.6M.


        Problem is that those datasets without pose/shape do not have them
        in the tfrecords. There's no way to check for this in TF,
        so, instead make 2 string_input_producers, one for data without 3d
        and other for data with 3d.
        And send [2 x *] to train.*batch
        """
        datasets_no3d = [d for d in self.datasets if d not in _3D_DATASETS]
        datasets_yes3d = [d for d in self.datasets if d in _3D_DATASETS]

        files_no3d = data_utils.get_all_files(self.dataset_dir, datasets_no3d)
        files_yes3d = data_utils.get_all_files(self.dataset_dir,
                                               datasets_yes3d)

        # Make sure we have dataset with 3D.
        if len(files_yes3d) == 0:
            print("Dont run this without any datasets with gt 3d")
            import ipdb; ipdb.set_trace()
            exit(1)

        do_shuffle = True

        fqueue_yes3d = tf.train.string_input_producer(
            files_yes3d, shuffle=do_shuffle, name="input_w3d")
        image, label, label3d, has_smpl3d = self.read_data(
            fqueue_yes3d, has_3d=True)

        if len(files_no3d) != 0:
            fqueue_no3d = tf.train.string_input_producer(
                files_no3d, shuffle=do_shuffle, name="input_wout3d")
            image_no3d, label_no3d = self.read_data(fqueue_no3d, has_3d=False)
            label3d_no3d = tf.zeros_like(label3d)
            image = tf.parallel_stack([image, image_no3d])
            label = tf.parallel_stack([label, label_no3d])
            label3d = tf.parallel_stack([label3d, label3d_no3d])
            # 3D joint is always available for data with 3d.
            has_3d_joints = tf.constant([True, False], dtype=tf.bool)
            has_3d_smpl = tf.concat([has_smpl3d, [False]], axis=0)
        else:
            # If no "no3d" images, need to make them 1 x *
            image = tf.expand_dims(image, 0)
            label = tf.expand_dims(label, 0)
            label3d = tf.expand_dims(label3d, 0)
            has_3d_joints = tf.constant([True], dtype=tf.bool)
            has_3d_smpl = has_smpl3d

        # Combine 3D bools.
        # each is 2 x 1, column is [3d_joints, 3d_smpl]
        has_3dgt = tf.stack([has_3d_joints, has_3d_smpl], axis=1)

        min_after_dequeue = 2000
        capacity = min_after_dequeue + 3 * self.batch_size

        image_batch, label_batch, label3d_batch, bool_batch = tf.train.shuffle_batch(
            [image, label, label3d, has_3dgt],
            batch_size=self.batch_size,
            num_threads=8,
            capacity=capacity,
            min_after_dequeue=min_after_dequeue,
            enqueue_many=True,
            name='input_batch_train_3d')

        if self.data_format == 'NCHW':
            image_batch = tf.transpose(image_batch, [0, 3, 1, 2])
        elif self.data_format == 'NHWC':
            pass
        else:
            raise Exception("[!] Unkown data_format: {}".format(
                self.data_format))

        batch_dict = {
            'image': image_batch,
            'label': label_batch,
            'label3d': label3d_batch,
            'has3d': bool_batch,
        }

        return batch_dict
示例#34
0
def decode(serialized_example):
    # reader = tf.TFRecordReader()

    # _, serialized_example = reader.read(filename_queue)

    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'height': tf.FixedLenFeature([], tf.int64),
            'width': tf.FixedLenFeature([], tf.int64),
            'depth': tf.FixedLenFeature([], tf.int64),
            'filename': tf.FixedLenFeature([], tf.string),
            'image_raw': tf.FixedLenFeature([], tf.string),
            'bbox_xc': tf.VarLenFeature(tf.float32),
            'bbox_yc': tf.VarLenFeature(tf.float32),
            'bbox_wid': tf.VarLenFeature(tf.float32),
            'bbox_hei': tf.VarLenFeature(tf.float32),
            'bbox_class': tf.VarLenFeature(tf.float32),
            'num_bbox': tf.FixedLenFeature([], tf.int64)
        })

    # Get meta data
    height = tf.cast(features['height'], tf.int32)
    width = tf.cast(features['width'], tf.int32)
    channels = tf.cast(features['depth'], tf.int32)
    n_bbox = tf.cast(features['num_bbox'], tf.int32)

    # Get data shapes
    image_shape = tf.parallel_stack([height, width, channels])
    bboxes_shape = tf.parallel_stack([n_bbox, 5])
    bbox_shape_sing = tf.parallel_stack([n_bbox, 1])

    # Get data
    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image = tf.reshape(image, image_shape)
    image = tf.to_float(image)
    image = tf.divide(image, 255.0)

    # BBOX data is actually dense convert it to dense tensor
    bbox_xc = tf.sparse_tensor_to_dense(features['bbox_xc'], default_value=0.0)
    #bbox_xc = tf.reshape(bbox_xc, bbox_shape_sing)
    bbox_yc = tf.sparse_tensor_to_dense(features['bbox_yc'], default_value=0.0)
    #bbox_yc = tf.reshape(bbox_yc, bbox_shape_sing)
    bbox_wid = tf.sparse_tensor_to_dense(features['bbox_wid'],
                                         default_value=0.0)
    #bbox_wid = tf.reshape(bbox_wid, bbox_shape_sing)
    bbox_hei = tf.sparse_tensor_to_dense(features['bbox_hei'],
                                         default_value=0.0)
    #bbox_hei = tf.reshape(bbox_hei, bbox_shape_sing)
    bbox_class = tf.sparse_tensor_to_dense(features['bbox_class'],
                                           default_value=0)
    #bbox_class = tf.reshape(bbox_class, bbox_shape_sing)

    bboxes = tf.stack((bbox_class, bbox_xc, bbox_yc, bbox_wid, bbox_hei),
                      axis=-1)

    # images, annotations = tf.data.Dataset.batch([image, bboxes], batch_size=2)
    # images, annotations = tf.train.shuffle_batch([image, bboxes], batch_size=2, capacity=30, num_threads=2, min_after_dequeue=10)

    return image, bboxes