def test_video_ffmpeg(self): video_path = os.path.join(self._test_data_path, 'video.mkv') video_json_path = os.path.join(self._test_data_path, 'video.json') with tf.io.gfile.GFile(video_json_path) as fp: video_array = np.asarray(json.load(fp)) self.assertFeature( feature=features.Video(shape=(5, 4, 2, 3)), shape=(5, 4, 2, 3), dtype=tf.uint8, tests=[ testing.FeatureExpectationItem( value=video_path, expected=video_array, ), ], ) self.assertFeature( feature=features.Video(shape=(5, 4, 2, 3)), shape=(5, 4, 2, 3), dtype=tf.uint8, tests=[ testing.FeatureExpectationItem( value=video_path, expected=video_array, ), ], ) class GFileWithSeekOnRead(tf.io.gfile.GFile): def read(self, *args, **kwargs): data_read = super(GFileWithSeekOnRead, self).read(*args, **kwargs) self.seek(0) return data_read with GFileWithSeekOnRead(video_path, 'rb') as video_fp: self.assertFeature( feature=features.Video(shape=(5, 4, 2, 3)), shape=(5, 4, 2, 3), dtype=tf.uint8, tests=[ testing.FeatureExpectationItem( value=video_fp, expected=video_array, ), ], )
def test_video_concatenated_frames(self): video_shape = (None, 400, 640, 3) lsun_examples_path = os.path.join(self._test_data_path, 'lsun_examples') frames_paths = [ os.path.join(lsun_examples_path, '{}.jpg'.format(i)) for i in (1, 2, 3, 4) ] frames = [] for frame_path in frames_paths: with tf.io.gfile.GFile(frame_path, 'rb') as frame_fp: frames.append(tf.image.decode_jpeg(frame_fp.read(), channels=3)) video = tf.stack(frames) self.assertFeature( feature=features.Video(shape=video_shape), shape=video_shape, dtype=tf.uint8, tests=[ # Numpy array testing.FeatureExpectationItem( value=frames_paths, expected=video, ), ], )
def test_video_numpy(self): np_video = np.random.randint(256, size=(128, 64, 64, 3), dtype=np.uint8) self.assertFeature( feature=features.Video(shape=(None, 64, 64, 3)), shape=(None, 64, 64, 3), dtype=tf.uint8, tests=[ testing.FeatureExpectationItem( value=np_video, expected=np_video, ), ], )
def test_video_numpy(self): np_video = np.random.randint(256, size=(128, 64, 64, 3), dtype=np.uint8) self.assertFeature( feature=features.Video(shape=(None, 64, 64, 3)), shape=(None, 64, 64, 3), dtype=tf.uint8, tests=[ testing.FeatureExpectationItem( value=np_video, expected=np_video, ), ], test_attributes=dict(_encoding_format='png', _extra_ffmpeg_args=[]))
def test_video(self): np_video = np.random.randint(256, size=(128, 64, 64, 3), dtype=np.uint8) self.assertFeature( feature=features.Video(shape=(None, 64, 64, 3)), shape=(None, 64, 64, 3), dtype=tf.uint8, tests=[ # Numpy array test_utils.FeatureExpectationItem( value=np_video, expected=np_video, ), # File path (Gif) # File path (.mp4) ], )
def test_video_custom_decode(self): image_path = os.fspath( utils.tfds_path('testing/test_data/test_image.jpg')) with tf.io.gfile.GFile(image_path, 'rb') as f: serialized_img = f.read() self.assertFeature( # Image with statically defined shape feature=features_lib.Video(shape=(None, 30, 60, 3)), shape=(None, 30, 60, 3), dtype=tf.uint8, tests=[ testing.FeatureExpectationItem( value=[image_path] * 15, # 15 frames of video expected=[serialized_img] * 15, # Non-decoded image shape=(15, ), dtype=tf.string, # Only string are decoded decoders=decode_lib.SkipDecoding(), ), ], ) # Test with FeatureDict self.assertFeature( feature=features_lib.FeaturesDict({ 'image': features_lib.Image(shape=(30, 60, 3), encoding_format='jpeg'), 'label': tf.int64, }), shape={ 'image': (30, 60, 3), 'label': (), }, dtype={ 'image': tf.uint8, 'label': tf.int64, }, tests=[ testing.FeatureExpectationItem( decoders={ 'image': decode_lib.SkipDecoding(), }, value={ 'image': image_path, 'label': 123, }, expected={ 'image': serialized_img, 'label': 123, }, shape={ 'image': (), 'label': (), }, dtype={ 'image': tf.string, 'label': tf.int64, }, ), ], )