コード例 #1
0
ファイル: image_ops_test.py プロジェクト: vurte/tensorflow
 def testExisting(self):
     # Read a real jpeg and verify shape
     path = ('tensorflow/core/lib/jpeg/testdata/' 'jpeg_merge_test1.jpg')
     with self.test_session() as sess:
         jpeg0 = io_ops.read_file(path)
         image0 = image_ops.decode_jpeg(jpeg0)
         image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
         jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
         self.assertEqual(len(jpeg0), 3771)
         self.assertEqual(image0.shape, (256, 128, 3))
         self.assertLess(self.averageError(image0, image1), 0.8)
コード例 #2
0
ファイル: image_ops_test.py プロジェクト: sarvex/tensorflow
 def testExisting(self):
     # Read a real jpeg and verify shape
     path = "tensorflow/core/lib/jpeg/testdata/" "jpeg_merge_test1.jpg"
     with self.test_session() as sess:
         jpeg0 = io_ops.read_file(path)
         image0 = image_ops.decode_jpeg(jpeg0)
         image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
         jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
         self.assertEqual(len(jpeg0), 3771)
         self.assertEqual(image0.shape, (256, 128, 3))
         self.assertLess(self.averageError(image0, image1), 0.8)
コード例 #3
0
    def _evalDecodeJpeg(self, image_name, parallelism, num_iters, tile=None):
        """Evaluate DecodeJpegOp for the given image.

    TODO(tanmingxing): add decoding+cropping as well.

    Args:
      image_name: a string of image file name (without suffix).
      parallelism: the number of concurrent decode_jpeg ops to be run.
      num_iters: number of iterations for evaluation.
      tile: if not None, tile the image to composite a larger fake image.

    Returns:
      The duration of the run in seconds.
    """
        ops.reset_default_graph()

        image_file_path = os.path.join(prefix_path, image_name)

        if tile is None:
            image_content = variable_scope.get_variable(
                'image_%s' % image_name,
                initializer=io_ops.read_file(image_file_path))
        else:
            single_image = image_ops.decode_jpeg(
                io_ops.read_file(image_file_path),
                channels=3,
                name='single_image')
            # Tile the image to composite a new larger image.
            tiled_image = array_ops.tile(single_image, tile)
            image_content = variable_scope.get_variable(
                'tiled_image_%s' % image_name,
                initializer=image_ops.encode_jpeg(tiled_image))

        with session.Session() as sess:
            sess.run(variables.global_variables_initializer())
            images = []
            for i in xrange(parallelism):
                images.append(
                    image_ops.decode_jpeg(image_content,
                                          channels=3,
                                          name='image_%d' % (i)))

            r = control_flow_ops.group(*images)

            for _ in xrange(3):
                # Skip warm up time.
                sess.run(r)

            start_time = time.time()
            for _ in xrange(num_iters):
                sess.run(r)
        return time.time() - start_time
コード例 #4
0
  def _evalDecodeJpeg(self, image_name, parallelism, num_iters, tile=None):
    """Evaluate DecodeJpegOp for the given image.

    TODO(tanmingxing): add decoding+cropping as well.

    Args:
      image_name: a string of image file name (without suffix).
      parallelism: the number of concurrent decode_jpeg ops to be run.
      num_iters: number of iterations for evaluation.
      tile: if not None, tile the image to composite a larger fake image.

    Returns:
      The duration of the run in seconds.
    """
    ops.reset_default_graph()

    image_file_path = os.path.join(prefix_path, image_name)

    if tile is None:
      image_content = variable_scope.get_variable(
          'image_%s' % image_name,
          initializer=io_ops.read_file(image_file_path))
    else:
      single_image = image_ops.decode_jpeg(
          io_ops.read_file(image_file_path), channels=3, name='single_image')
      # Tile the image to composite a new larger image.
      tiled_image = array_ops.tile(single_image, tile)
      image_content = variable_scope.get_variable(
          'tiled_image_%s' % image_name,
          initializer=image_ops.encode_jpeg(tiled_image))

    with session.Session() as sess:
      sess.run(variables.global_variables_initializer())
      images = []
      for i in xrange(parallelism):
        images.append(
            image_ops.decode_jpeg(
                image_content, channels=3, name='image_%d' % (i)))

      r = control_flow_ops.group(*images)

      for _ in xrange(3):
        # Skip warm up time.
        sess.run(r)

      start_time = time.time()
      for _ in xrange(num_iters):
        sess.run(r)
    return time.time() - start_time
コード例 #5
0
 def testCmyk(self):
     # Confirm that CMYK reads in as RGB
     base = "tensorflow/core/lib/jpeg/testdata"
     rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
     cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
     shape = 256, 128, 3
     for channels in 3, 0:
         with self.test_session() as sess:
             rgb = image_ops.decode_jpeg(io_ops.read_file(rgb_path), channels=channels)
             cmyk = image_ops.decode_jpeg(io_ops.read_file(cmyk_path), channels=channels)
             rgb, cmyk = sess.run([rgb, cmyk])
             self.assertEqual(rgb.shape, shape)
             self.assertEqual(cmyk.shape, shape)
             error = self.averageError(rgb, cmyk)
             self.assertLess(error, 4)
コード例 #6
0
 def decode_jpeg():
     """Decodes a jpeg image with specified '_dct_method'."""
     return math_ops.cast(
         image_ops.decode_jpeg(image_buffer,
                               channels=self._channels,
                               dct_method=self._dct_method),
         self._dtype)
コード例 #7
0
 def testShape(self):
   with self.test_session() as sess:
     jpeg = constant_op.constant('nonsense')
     for channels in 0, 1, 3:
       image = image_ops.decode_jpeg(jpeg, channels=channels)
       self.assertEqual(image.get_shape().as_list(),
                        [None, None, channels or None])
コード例 #8
0
 def decode_jpeg():
   """Decodes a jpeg image with specified '_dct_method'."""
   return math_ops.cast(
       image_ops.decode_jpeg(
           image_buffer,
           channels=self._channels,
           dct_method=self._dct_method), self._dtype)
コード例 #9
0
ファイル: pascal_voc_reader.py プロジェクト: Mooonside/SEGS
def decode(serialized_example):
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'image/name': tf.FixedLenFeature([], tf.string),
            'image/height': tf.FixedLenFeature([1], tf.int64),
            'image/width': tf.FixedLenFeature([1], tf.int64),
            'image/channels': tf.FixedLenFeature([1], tf.int64),
            'image/shape': tf.FixedLenFeature([3], tf.int64),
            'image/format': tf.FixedLenFeature([], tf.string),
            'image/encoded': tf.FixedLenFeature([], tf.string),
            'label/segmentation/format': tf.FixedLenFeature([], tf.string),
            'label/segmentation/encoded': tf.FixedLenFeature([], tf.string),
            'label/object/bbox/xmin': tf.VarLenFeature(tf.float32),
            'label/object/bbox/ymin': tf.VarLenFeature(tf.float32),
            'label/object/bbox/xmax': tf.VarLenFeature(tf.float32),
            'label/object/bbox/ymax': tf.VarLenFeature(tf.float32),
            'label/object/bbox/label': tf.VarLenFeature(tf.int64),
            'label/object/bbox/difficult': tf.VarLenFeature(tf.int64),
            'label/object/bbox/truncated': tf.VarLenFeature(tf.int64)
        })

    features['image/encoded'] = image_ops.decode_jpeg(
        features['image/encoded'], channels=3)
    features['label/segmentation/encoded'] = image_ops.decode_png(
        features['label/segmentation/encoded'], channels=1)

    return features
コード例 #10
0
def decode(serialized_example):
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'image/name': tf.FixedLenFeature([], tf.string),
            'image/height': tf.FixedLenFeature([], tf.int64),
            'image/width': tf.FixedLenFeature([], tf.int64),
            'image/channels': tf.FixedLenFeature([], tf.int64),
            'image/format': tf.FixedLenFeature([], tf.string),
            'image/encoded': tf.FixedLenFeature([], tf.string),
            'label/num_classes': tf.FixedLenFeature([], tf.int64),
            'label/masks': tf.FixedLenFeature([], tf.string),
            'label/bboxes': tf.FixedLenFeature([], tf.string),
            'label/classes': tf.FixedLenFeature([], tf.string)
        }
    )

    features['image/encoded'] = image_ops.decode_jpeg(features['image/encoded'], channels=3)
    features['label/masks'] = tf.decode_raw(features['label/masks'], tf.float64)
    features['label/bboxes'] = tf.decode_raw(features['label/bboxes'], tf.float64)
    features['label/classes'] = tf.decode_raw(features['label/classes'], tf.float64)

    ih = tf.cast(features['image/height'], tf.int32)
    iw = tf.cast(features['image/width'], tf.int32)
    num_classes = tf.cast(features['label/num_classes'], tf.int32)

    features['label/masks'] = tf.cast(tf.reshape(features['label/masks'], [ih, iw, num_classes]), tf.int64)
    features['label/bboxes'] = tf.cast(tf.reshape(features['label/bboxes'], [num_classes, 4]), tf.float64)
    features['label/classes'] = tf.cast(tf.reshape(features['label/classes'], [num_classes, 1]), tf.int64)

    return features
コード例 #11
0
ファイル: image_ops_test.py プロジェクト: vurte/tensorflow
 def testShape(self):
     with self.test_session() as sess:
         jpeg = constant_op.constant('nonsense')
         for channels in 0, 1, 3:
             image = image_ops.decode_jpeg(jpeg, channels=channels)
             self.assertEqual(image.get_shape().as_list(),
                              [None, None, channels or None])
コード例 #12
0
ファイル: image_ops_test.py プロジェクト: vurte/tensorflow
 def testCmyk(self):
     # Confirm that CMYK reads in as RGB
     base = 'tensorflow/core/lib/jpeg/testdata'
     rgb_path = os.path.join(base, 'jpeg_merge_test1.jpg')
     cmyk_path = os.path.join(base, 'jpeg_merge_test1_cmyk.jpg')
     shape = 256, 128, 3
     for channels in 3, 0:
         with self.test_session() as sess:
             rgb = image_ops.decode_jpeg(io_ops.read_file(rgb_path),
                                         channels=channels)
             cmyk = image_ops.decode_jpeg(io_ops.read_file(cmyk_path),
                                          channels=channels)
             rgb, cmyk = sess.run([rgb, cmyk])
             self.assertEqual(rgb.shape, shape)
             self.assertEqual(cmyk.shape, shape)
             error = self.averageError(rgb, cmyk)
             self.assertLess(error, 4)
コード例 #13
0
  def testSynthetic(self):
    with self.test_session() as sess:
      # Encode it, then decode it, then encode it
      image0 = constant_op.constant(_SimpleColorRamp())
      jpeg0 = image_ops.encode_jpeg(image0)
      image1 = image_ops.decode_jpeg(jpeg0)
      image2 = image_ops.decode_jpeg(image_ops.encode_jpeg(image1))
      jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])

      # The decoded-encoded image should be similar to the input
      self.assertLess(self.averageError(image0, image1), 0.6)

      # We should be very close to a fixpoint
      self.assertLess(self.averageError(image1, image2), 0.02)

      # Smooth ramps compress well (input size is 153600)
      self.assertGreaterEqual(len(jpeg0), 5000)
      self.assertLessEqual(len(jpeg0), 6000)
コード例 #14
0
  def testSynthetic(self):
    with self.test_session() as sess:
      # Encode it, then decode it, then encode it
      image0 = constant_op.constant(_SimpleColorRamp())
      jpeg0 = image_ops.encode_jpeg(image0)
      image1 = image_ops.decode_jpeg(jpeg0)
      image2 = image_ops.decode_jpeg(image_ops.encode_jpeg(image1))
      jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])

      # The decoded-encoded image should be similar to the input
      self.assertLess(self.averageError(image0, image1), 0.6)

      # We should be very close to a fixpoint
      self.assertLess(self.averageError(image1, image2), 0.02)

      # Smooth ramps compress well (input size is 153600)
      self.assertGreaterEqual(len(jpeg0), 5000)
      self.assertLessEqual(len(jpeg0), 6000)
コード例 #15
0
 def testJpeg(self):
   # Read a real jpeg and verify shape
   path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
   with self.test_session(use_gpu=True) as sess:
     jpeg0 = io_ops.read_file(path)
     image0 = image_ops.decode_image(jpeg0)
     image1 = image_ops.decode_jpeg(jpeg0)
     jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
     self.assertEqual(len(jpeg0), 3771)
     self.assertEqual(image0.shape, (256, 128, 3))
     self.assertAllEqual(image0, image1)
コード例 #16
0
 def testJpeg(self):
   # Read a real jpeg and verify shape
   path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
   with self.test_session(use_gpu=True) as sess:
     jpeg0 = io_ops.read_file(path)
     image0 = image_ops.decode_image(jpeg0)
     image1 = image_ops.decode_jpeg(jpeg0)
     jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
     self.assertEqual(len(jpeg0), 3771)
     self.assertEqual(image0.shape, (256, 128, 3))
     self.assertAllEqual(image0, image1)
コード例 #17
0
def extract(features):
    image = features['image/encoded']

    image = image_ops.decode_jpeg(image, channels=3)
    label = features['image/class/label']
    ymin = tf.sparse_tensor_to_dense(features['image/object/bbox/ymin'])
    xmin = tf.sparse_tensor_to_dense(features['image/object/bbox/xmin'])
    ymax = tf.sparse_tensor_to_dense(features['image/object/bbox/ymax'])
    xmax = tf.sparse_tensor_to_dense(features['image/object/bbox/xmax'])
    bboxes = tf.stack([ymin, xmin, ymax, xmax], axis=-1)
    name = features['image/filename']

    return name, image, label, bboxes
コード例 #18
0
  def testJpeg(self):
    # Read a real jpeg and verify shape
    path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
    with self.session():
      jpeg0 = io_ops.read_file(path)
      image0 = image_ops.decode_image(jpeg0)
      image1 = image_ops.decode_jpeg(jpeg0)
      jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
      self.assertEqual(len(jpeg0), 3771)
      self.assertEqual(image0.shape, (256, 128, 3))
      self.assertAllEqual(image0, image1)

      with self.assertRaises(errors_impl.InvalidArgumentError):
        bad_channels = image_ops.decode_image(jpeg0, channels=4)
        self.evaluate(bad_channels)
コード例 #19
0
  def testJpeg(self):
    # Read a real jpeg and verify shape
    path = os.path.join(prefix_path, "jpeg", "testdata", "jpeg_merge_test1.jpg")
    with self.session(use_gpu=True) as sess:
      jpeg0 = io_ops.read_file(path)
      image0 = image_ops.decode_image(jpeg0)
      image1 = image_ops.decode_jpeg(jpeg0)
      jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
      self.assertEqual(len(jpeg0), 3771)
      self.assertEqual(image0.shape, (256, 128, 3))
      self.assertAllEqual(image0, image1)

      bad_channels = image_ops.decode_image(jpeg0, channels=4)
      with self.assertRaises(errors_impl.InvalidArgumentError):
        self.evaluate(bad_channels)
コード例 #20
0
ファイル: pascal_voc_reader.py プロジェクト: jiabaocui/SEGS
def decode(serialized_example):
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'image/name': tf.FixedLenFeature([], tf.string),
            'image/height': tf.FixedLenFeature([1], tf.int64),
            'image/width': tf.FixedLenFeature([1], tf.int64),
            'image/channels': tf.FixedLenFeature([1], tf.int64),
            'image/shape': tf.FixedLenFeature([3], tf.int64),
            'image/format': tf.FixedLenFeature([], tf.string),
            'image/encoded': tf.FixedLenFeature([], tf.string),
            'label/format': tf.FixedLenFeature([], tf.string),
            'label/encoded': tf.FixedLenFeature([], tf.string)
        })

    features['image/encoded'] = image_ops.decode_jpeg(features['image/encoded'], channels=3)
    features['label/encoded'] = image_ops.decode_png(features['label/encoded'], channels=1)
    # image.set_shape((mnist.IMAGE_PIXELS))
    return features
コード例 #21
0
ファイル: COCO_reader_stuff.py プロジェクト: Mooonside/SEGS
def decode(serialized_example):
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'image/name': tf.FixedLenFeature([], tf.string),
            'image/encoded': tf.FixedLenFeature([], tf.string),
            'image/height': tf.FixedLenFeature([], tf.int64),
            'image/width': tf.FixedLenFeature([], tf.int64),
            'label/encoded': tf.FixedLenFeature([], tf.string)
        })

    features['image/encoded'] = image_ops.decode_jpeg(
        features['image/encoded'], channels=3)
    features['label/encoded'] = tf.decode_raw(features['label/encoded'],
                                              tf.float64)
    ih = tf.cast(features['image/height'], tf.int32)
    iw = tf.cast(features['image/width'], tf.int32)
    features['label/encoded'] = tf.reshape(features['label/encoded'],
                                           [ih, iw, 1])
    features['label/encoded'] = tf.cast(features['label/encoded'], tf.int64)
    # image.set_shape((mnist.IMAGE_PIXELS))
    return features
コード例 #22
0
def decode_image(image_buffer):
    return image_ops.decode_jpeg(image_buffer, 3)
コード例 #23
0
ファイル: input_data.py プロジェクト: fgr1986/coco_mcu
 def decode_jpeg():
     return tf.cast(image_ops.decode_jpeg(image_buffer, channels=3),
                    tf.uint8)
コード例 #24
0
 def decode_jpg():
     return image_ops.decode_jpeg(image_buffer, self._channels)
コード例 #25
0
ファイル: tfexample_decoder.py プロジェクト: LUTAN/tensorflow
 def decode_jpg():
   if self._dtype != dtypes.uint8:
     raise ValueError(
         'jpeg decoder can only be used to decode to tf.uint8 but %s was '
         'requested for a jpeg image.' % self._dtype)
   return image_ops.decode_jpeg(image_buffer, self._channels)
コード例 #26
0
 def decode_jpg():
   if self._dtype != dtypes.uint8:
     raise ValueError(
         'jpeg decoder can only be used to decode to tf.uint8 but %s was '
         'requested for a jpeg image.' % self._dtype)
   return image_ops.decode_jpeg(image_buffer, self._channels)
コード例 #27
0
  def _evalDecodeJpeg(self,
                      image_name,
                      parallelism,
                      num_iters,
                      crop_during_decode=None,
                      crop_window=None,
                      tile=None):
    """Evaluate DecodeJpegOp for the given image.

    TODO(tanmingxing): add decoding+cropping as well.

    Args:
      image_name: a string of image file name (without suffix).
      parallelism: the number of concurrent decode_jpeg ops to be run.
      num_iters: number of iterations for evaluation.
      crop_during_decode: If true, use fused DecodeAndCropJpeg instead of
          separate decode and crop ops. It is ignored if crop_window is None.
      crop_window: if not None, crop the decoded image. Depending on
          crop_during_decode, cropping could happen during or after decoding.
      tile: if not None, tile the image to composite a larger fake image.

    Returns:
      The duration of the run in seconds.
    """
    ops.reset_default_graph()

    image_file_path = os.path.join(prefix_path, image_name)

    if tile is None:
      image_content = variable_scope.get_variable(
          'image_%s' % image_name,
          initializer=io_ops.read_file(image_file_path))
    else:
      single_image = image_ops.decode_jpeg(
          io_ops.read_file(image_file_path), channels=3, name='single_image')
      # Tile the image to composite a new larger image.
      tiled_image = array_ops.tile(single_image, tile)
      image_content = variable_scope.get_variable(
          'tiled_image_%s' % image_name,
          initializer=image_ops.encode_jpeg(tiled_image))

    with session.Session() as sess:
      sess.run(variables.global_variables_initializer())
      images = []
      for _ in xrange(parallelism):
        if crop_window is None:
          # No crop.
          image = image_ops.decode_jpeg(image_content, channels=3)
        elif crop_during_decode:
          # combined decode and crop.
          image = image_ops.decode_and_crop_jpeg(
              image_content, crop_window, channels=3)
        else:
          # separate decode and crop.
          image = image_ops.decode_jpeg(image_content, channels=3)
          image = image_ops.crop_to_bounding_box(
              image,
              offset_height=crop_window[0],
              offset_width=crop_window[1],
              target_height=crop_window[2],
              target_width=crop_window[3])

        images.append(image)
      r = control_flow_ops.group(*images)

      for _ in xrange(3):
        # Skip warm up time.
        sess.run(r)

      start_time = time.time()
      for _ in xrange(num_iters):
        sess.run(r)
    return time.time() - start_time
コード例 #28
0
    def _evalDecodeJpeg(self,
                        image_name,
                        parallelism,
                        num_iters,
                        crop_during_decode=None,
                        crop_window=None,
                        tile=None):
        """Evaluate DecodeJpegOp for the given image.

    TODO(tanmingxing): add decoding+cropping as well.

    Args:
      image_name: a string of image file name (without suffix).
      parallelism: the number of concurrent decode_jpeg ops to be run.
      num_iters: number of iterations for evaluation.
      crop_during_decode: If true, use fused DecodeAndCropJpeg instead of
          separate decode and crop ops. It is ignored if crop_window is None.
      crop_window: if not None, crop the decoded image. Depending on
          crop_during_decode, cropping could happen during or after decoding.
      tile: if not None, tile the image to composite a larger fake image.

    Returns:
      The duration of the run in seconds.
    """
        ops.reset_default_graph()

        image_file_path = resource_loader.get_path_to_datafile(
            os.path.join('core', 'lib', 'jpeg', 'testdata', image_name))

        if tile is None:
            image_content = variable_scope.get_variable(
                'image_%s' % image_name,
                initializer=io_ops.read_file(image_file_path))
        else:
            single_image = image_ops.decode_jpeg(
                io_ops.read_file(image_file_path),
                channels=3,
                name='single_image')
            # Tile the image to composite a new larger image.
            tiled_image = array_ops.tile(single_image, tile)
            image_content = variable_scope.get_variable(
                'tiled_image_%s' % image_name,
                initializer=image_ops.encode_jpeg(tiled_image))

        with session.Session() as sess:
            self.evaluate(variables.global_variables_initializer())
            images = []
            for _ in xrange(parallelism):
                if crop_window is None:
                    # No crop.
                    image = image_ops.decode_jpeg(image_content, channels=3)
                elif crop_during_decode:
                    # combined decode and crop.
                    image = image_ops.decode_and_crop_jpeg(image_content,
                                                           crop_window,
                                                           channels=3)
                else:
                    # separate decode and crop.
                    image = image_ops.decode_jpeg(image_content, channels=3)
                    image = image_ops.crop_to_bounding_box(
                        image,
                        offset_height=crop_window[0],
                        offset_width=crop_window[1],
                        target_height=crop_window[2],
                        target_width=crop_window[3])

                images.append(image)
            r = control_flow_ops.group(*images)

            for _ in xrange(3):
                # Skip warm up time.
                self.evaluate(r)

            start_time = time.time()
            for _ in xrange(num_iters):
                self.evaluate(r)
            end_time = time.time()
        return end_time - start_time
コード例 #29
0
 def DecodeJpg():
     return image_ops.decode_jpeg(image_buffer, 3)
コード例 #30
0
 def DecodeJpg():
   return image_ops.decode_jpeg(image_buffer, 3)
コード例 #31
0
 def decode_jpg():
   return image_ops.decode_jpeg(image_buffer, self._channels)
コード例 #32
0
 def decode_jpeg():
     return image_ops.decode_jpeg(
         image_buffer, channels=num_channels, dct_method=self._dct_method)