コード例 #1
0
ファイル: test_util.py プロジェクト: yashk2810/benchmarks
def _worker_batches_in_numpy_array(numpy_inputs, batch_size, shift_ratio):
  """Yields batches from a numpy array, for a single worker."""
  numpy_inputs = cnn_util.roll_numpy_batches(numpy_inputs, batch_size,
                                             shift_ratio)
  i = 0
  total_batches = numpy_inputs.shape[0]
  assert total_batches % batch_size == 0
  while True:
    yield numpy_inputs[i:i + batch_size, ...]
    i = (i + batch_size) % total_batches
コード例 #2
0
  def minibatch(self,
                dataset,
                subset,
                use_datasets,
                datasets_repeat_cached_sample,
                shift_ratio=0):
    """Get test image batches."""
    del dataset, use_datasets, datasets_repeat_cached_sample
    if (not hasattr(self, 'fake_images') or
        not hasattr(self, 'fake_labels')):
      raise ValueError('Must call set_fake_data() before calling minibatch '
                       'on TestImagePreprocessor')
    if self.expected_subset is not None:
      assert subset == self.expected_subset

    shift_ratio = shift_ratio or self.shift_ratio
    fake_images = cnn_util.roll_numpy_batches(self.fake_images, self.batch_size,
                                              shift_ratio)
    fake_labels = cnn_util.roll_numpy_batches(self.fake_labels, self.batch_size,
                                              shift_ratio)

    with tf.name_scope('batch_processing'):
      image_slice, label_slice = tf.train.slice_input_producer(
          [fake_images, fake_labels],
          shuffle=False,
          name='image_slice')
      raw_images, raw_labels = tf.train.batch(
          [image_slice, label_slice], batch_size=self.batch_size,
          name='image_batch')
      images = [[] for _ in range(self.num_splits)]
      labels = [[] for _ in range(self.num_splits)]
      for i in xrange(self.batch_size):
        split_index = i % self.num_splits
        raw_image = tf.cast(raw_images[i], self.dtype)
        images[split_index].append(raw_image)
        labels[split_index].append(raw_labels[i])
      for split_index in xrange(self.num_splits):
        images[split_index] = tf.parallel_stack(images[split_index])
        labels[split_index] = tf.parallel_stack(labels[split_index])

      normalized = [normalized_image(part) for part in images]
      return [[tf.cast(part, self.dtype) for part in normalized], labels]