def repeated_sequence_headers(codec_features): """ **Tests the decoder can handle a stream with repeated sequence headers.** This test case consists of a sequence containing two frames in which the sequence header is repeated before every picture. This test will be omitted if the VC-2 level prohibits the repetition of the sequence header. """ try: # Generate a base sequence in which we'll replace the sequence headers # later. We ensure we have at least two pictures to ensure we get # pictures and sequence headers being interleaved. sequence = make_sequence( codec_features, repeat_pictures( static_sprite( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), 2, ), # Force an extra sequence header between every data unit "(sequence_header .)+", ) except IncompatibleLevelAndDataUnitError: # Do not try to force levels which don't support this level of sequence # header interleaving to accept it. return None return Stream(sequences=[sequence])
def absent_next_parse_offset(codec_features): """ **Tests handling of missing 'next parse offset' field.** The 'next parse offset' field of the ``parse_info`` header (see (10.5.1)) can be set to zero (i.e. omitted) for pictures. This test case verifies that decoders are still able to decode streams with this field absent. """ sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), 2, ), ) # Prevent the default auto numbering of picture-containing data units # during serialisation for data_unit in sequence["data_units"]: parse_info = data_unit["parse_info"] if parse_info["parse_code"] in ( ParseCodes.low_delay_picture, ParseCodes.high_quality_picture, ParseCodes.low_delay_picture_fragment, ParseCodes.high_quality_picture_fragment, ): parse_info["next_parse_offset"] = 0 return Stream(sequences=[sequence])
def test_iter_transform_parameters_in_sequence(profile, fragment_slice_count): codec_features = MINIMAL_CODEC_FEATURES.copy() codec_features["profile"] = profile codec_features["fragment_slice_count"] = fragment_slice_count codec_features["slices_x"] = 3 codec_features["slices_y"] = 2 codec_features["picture_bytes"] = 100 num_pictures = 2 sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), num_pictures, ), ) transform_parameters = list( iter_transform_parameters_in_sequence(codec_features, sequence)) # Should have found every slice assert len(transform_parameters) == num_pictures
def picture_numbers(codec_features): """ **Tests picture numbers are correctly read from the bitstream.** Each test case contains 8 blank pictures numbered in a particular way. ``picture_numbers[start_at_zero]`` The first picture has picture number 0. ``picture_numbers[non_zero_start]`` The first picture has picture number 1000. ``picture_numbers[wrap_around]`` The first picture has picture number 4294967292, with the picture numbers wrapping around to 0 on the 4th picture in the sequence. ``picture_numbers[odd_first_picture]`` The first picture has picture number 7. This test case is only included when the picture coding mode is 0 (i.e. pictures are frames) since the first field of each frame must have an even number when the picture coding mode is 1 (i.e. pictures are fields) (11.5). """ # Create a sequence with at least 8 pictures (and 4 frames) mid_gray_pictures = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], )) mid_gray_pictures = list( repeat_pictures( mid_gray_pictures, 8 // len(mid_gray_pictures), )) test_cases = [ ("start_at_zero", [0, 1, 2, 3, 4, 5, 6, 7]), ("non_zero_start", [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007]), ("wrap_around", [4294967292, 4294967293, 4294967294, 4294967295, 0, 1, 2, 3]), ] if codec_features[ "picture_coding_mode"] == PictureCodingModes.pictures_are_frames: test_cases.append(("odd_first_picture", [7, 8, 9, 10, 11, 12, 13, 14])) for description, picture_numbers in test_cases: yield TestCase( Stream(sequences=[ make_sequence( codec_features, [ dict(picture, pic_num=pic_num) for picture, pic_num in zip(mid_gray_pictures, picture_numbers) ], ) ]), description, )
def test_iter_slices_in_sequence(profile, fragment_slice_count): codec_features = MINIMAL_CODEC_FEATURES.copy() codec_features["profile"] = profile codec_features["fragment_slice_count"] = fragment_slice_count codec_features["slices_x"] = 3 codec_features["slices_y"] = 2 codec_features["picture_bytes"] = 100 num_pictures = 2 sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), num_pictures, ), ) slices = list(iter_slices_in_sequence(codec_features, sequence)) # Should have found every slice assert len(slices) == (codec_features["slices_x"] * codec_features["slices_y"] * num_pictures) # Should have correct states if profile == Profiles.high_quality: for state, _, _, _ in slices: assert state == State( slice_prefix_bytes=0, slice_size_scaler=1, slices_x=codec_features["slices_x"], slices_y=codec_features["slices_y"], ) elif profile == Profiles.low_delay: slice_bytes = Fraction( codec_features["picture_bytes"], codec_features["slices_x"] * codec_features["slices_y"], ) for state, _, _, _ in slices: assert state == State( slice_bytes_numerator=slice_bytes.numerator, slice_bytes_denominator=slice_bytes.denominator, slices_x=codec_features["slices_x"], slices_y=codec_features["slices_y"], ) # Should have correct coordinates it = iter(slices) for _ in range(num_pictures): for exp_sy in range(codec_features["slices_y"]): for exp_sx in range(codec_features["slices_x"]): _, sx, sy, _ = next(it) assert exp_sx == sx assert exp_sy == sy
def test_multiple_repeats(self, pictures): repeated = list(repeat_pictures(iter(pictures), 3)) assert len(repeated) == len(pictures) * 3 # Check repeated for orig, new in zip(cycle(pictures), repeated): assert orig["Y"] == new["Y"] assert orig["C1"] == new["C1"] assert orig["C2"] == new["C2"] # Check picture numbers consecutive assert [p["pic_num"] for p in repeated] == list(range(len(repeated)))
def test_iter_slice_parameters_in_sequence(fragment_slice_count): codec_features = MINIMAL_CODEC_FEATURES.copy() codec_features["profile"] = Profiles.high_quality codec_features["fragment_slice_count"] = fragment_slice_count num_pictures = 2 sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), num_pictures, ), ) slice_parameters = list(iter_slice_parameters_in_sequence(sequence)) # Should have found every set of slice parameters assert len(slice_parameters) == num_pictures
def test_minimum_qindex(kwargs, exp_qis): codec_features = MINIMAL_CODEC_FEATURES.copy() pictures = list( repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), 3, )) assert len(pictures) == 3 seq = make_sequence(codec_features, pictures, **kwargs) qis = [] for data_unit in seq["data_units"]: if "picture_parse" in data_unit: tx_data = data_unit["picture_parse"]["wavelet_transform"][ "transform_data"] qis.append(tx_data["hq_slices"][0]["qindex"]) assert qis == exp_qis
def signal_range(codec_features): """ **Tests that an encoder has sufficient numerical dynamic range.** These test cases contain test patterns designed to produce extreme signals within encoders. During these test cases, no integer clamping or overflows must occur. A test case is produced for each picture component: ``signal_range[Y]`` Luma component test patterns. ``signal_range[C1]`` Color difference 1 component test patterns. ``signal_range[C2]`` Color difference 2 component test patterns. Though the test patterns produce near worst case signal levels, they are not guaranteed to produce the largest values possible. .. note:: For informational purposes, an example of a set of test patterns are shown below: .. image:: /_static/user_guide/signal_range_encoder.png An informative metadata file is provided along side each test case which gives, for each picture in the bitstream, the parts of a encoder which are being tested by the test patterns. See :py:class:`vc2_bit_widths.helpers.TestPoint` for details. """ try: ( analysis_luma_pictures, synthesis_luma_pictures, analysis_color_diff_pictures, synthesis_color_diff_pictures, ) = get_test_pictures(codec_features) except MissingStaticAnalysisError: logging.warning( ("No static analysis available for the wavelet " "used by codec '%s'. Signal range test cases cannot " "be generated."), codec_features["name"], ) return for component, analysis_test_pictures in [ ("Y", analysis_luma_pictures), ("C1", analysis_color_diff_pictures), ("C2", analysis_color_diff_pictures), ]: # Generate an initially empty set of mid-gray pictures one_gray_frame = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], )) pictures = list( repeat_pictures( one_gray_frame, ((len(analysis_test_pictures) + len(one_gray_frame) - 1) // len(one_gray_frame)), )) # Fill-in the test patterns for test_picture, picture in zip(analysis_test_pictures, pictures): picture[component] = test_picture.picture.tolist() # Extract the testpoints in JSON-serialisable form metadata = [[tp._asdict() for tp in p.test_points] for p in analysis_test_pictures] out = EncoderTestSequence( pictures=pictures, video_parameters=codec_features["video_parameters"], picture_coding_mode=codec_features["picture_coding_mode"], ) yield TestCase(out, component, metadata=metadata)
def padding_data(codec_features): """ **Tests that the contents of padding data units are ignored.** This test case consists of a sequence containing two blank frames in which every-other data unit is a padding data unit (10.4.5) of various lengths and contents (described below). ``padding_data[empty]`` Padding data units containing zero padding bytes (i.e. just consisting of a parse info header). ``padding_data[zero]`` Padding data units containing 32 bytes set to 0x00. ``padding_data[non_zero]`` Padding data units containing 32 bytes containing the ASCII encoding of the text ``Ignore this padding data please!``. ``padding_data[dummy_end_of_sequence]`` Padding data units containing 32 bytes containing an encoding of an end of sequence data unit (10.4.1). Where padding data units are not permitted by the VC-2 level in use, these test cases are omitted. """ # Generate a base sequence in which we'll modify the padding data units. We # ensure there are always at least two pictures in the sequences to make # premature termination obvious. try: base_sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), 2, ), # Insert padding data between every data unit "sequence_header (padding_data .)* padding_data end_of_sequence $", ) except IncompatibleLevelAndDataUnitError: # Padding not allowed in the supplied video format so just skip this # test return for description, data in [ ( "empty", b"", ), ( "zero", b"\x00" * 32, ), ( "non_zero", b"Ignore this padding data please!", ), ( "dummy_end_of_sequence", make_dummy_end_of_sequence().ljust(32, b"\x00"), ), ]: yield TestCase( Stream(sequences=[replace_padding_data(base_sequence, data)]), description, )
class TestGenericPictureGeneratorBehaviour(object): @pytest.fixture(params=[ real_pictures, moving_sprite, static_sprite, mid_gray, white_noise, linear_ramps, lambda *a, **kw: repeat_pictures(mid_gray(*a, **kw), 2), ]) def picture_generator( self, request, replace_real_pictures_with_test_pictures, # noqa: F811 ): return request.param @pytest.fixture def vp(self): return VideoParameters( frame_width=16, frame_height=8, frame_rate_numer=1, frame_rate_denom=1, pixel_aspect_ratio_numer=1, pixel_aspect_ratio_denom=1, source_sampling=SourceSamplingModes.progressive, top_field_first=True, color_diff_format_index=ColorDifferenceSamplingFormats.color_4_4_4, color_primaries_index=PresetColorPrimaries.hdtv, color_matrix_index=PresetColorMatrices.hdtv, transfer_function_index=PresetTransferFunctions.tv_gamma, luma_offset=0, luma_excursion=255, color_diff_offset=128, color_diff_excursion=255, ) @pytest.mark.parametrize("ssm", SourceSamplingModes) @pytest.mark.parametrize("pcm", PictureCodingModes) def test_produces_correct_dict_type(self, picture_generator, vp, ssm, pcm): vp["source_sampling"] = ssm int_types = (int, ) if sys.version_info < (3, 0, 0): int_types += (long, ) # noqa: F821 for picture in list(picture_generator(vp, pcm)): assert isinstance(picture["Y"], list) assert isinstance(picture["Y"][0], list) assert type(picture["Y"][0][0]) in int_types assert isinstance(picture["C1"], list) assert isinstance(picture["C1"][0], list) assert type(picture["C1"][0][0]) in int_types assert isinstance(picture["C2"], list) assert isinstance(picture["C2"][0], list) assert type(picture["C2"][0][0]) in int_types assert isinstance(picture["pic_num"], int) @pytest.mark.parametrize("ssm", SourceSamplingModes) @pytest.mark.parametrize("pcm", PictureCodingModes) def test_produces_whole_number_of_frames(self, picture_generator, vp, ssm, pcm): vp["source_sampling"] = ssm pictures = list(picture_generator(vp, pcm)) if pcm == PictureCodingModes.pictures_are_frames: assert len(pictures) >= 1 else: # Must have even number of frames assert len(pictures) >= 2 assert (len(pictures) % 2) == 0 # Even/odd fields must have even/odd picture numbers for i, picture in enumerate(pictures): assert (picture["pic_num"] % 2) == (i % 2) # Picture numbers must be consecutive last_num = pictures[0]["pic_num"] for picture in pictures[1:]: assert picture["pic_num"] == last_num + 1 last_num += 1 @pytest.mark.parametrize("ssm", SourceSamplingModes) @pytest.mark.parametrize("pcm", PictureCodingModes) @pytest.mark.parametrize("cds", ColorDifferenceSamplingFormats) def test_produces_correct_picture_sizes(self, picture_generator, vp, ssm, pcm, cds): vp["source_sampling"] = ssm vp["color_diff_format_index"] = cds picture = list(picture_generator(vp, pcm))[0] y = np.array(picture["Y"]) c1 = np.array(picture["C1"]) c2 = np.array(picture["C2"]) dd = compute_dimensions_and_depths(vp, pcm) assert y.shape == (dd["Y"].height, dd["Y"].width) assert c1.shape == (dd["C1"].height, dd["C1"].width) assert c2.shape == (dd["C2"].height, dd["C2"].width) @pytest.mark.parametrize("width,height", [(1, 1), (1, 10), (10, 1)]) def test_supports_absurd_video_sizes(self, picture_generator, vp, width, height): # Should produce a couple of frames for multi-frame generators vp["frame_rate_numer"] = 2 vp["frame_rate_denom"] = 1 vp["frame_width"] = width vp["frame_height"] = height pcm = PictureCodingModes.pictures_are_frames # Mustn't crash... list(picture_generator(vp, pcm))
def test_one_repeat(self, pictures): assert list(repeat_pictures(iter(pictures), 1)) == pictures
def test_zero_repeats(self, pictures): assert list(repeat_pictures(iter(pictures), 0)) == []
def signal_range(codec_features): """ **Tests that a decoder has sufficient numerical dynamic range.** These test cases contain a series of pictures containing test patterns designed to produce extreme signals within decoders. During these test cases, no integer clamping (except for final output clamping) or integer overflows must occur. A test case is produced for each picture component: ``signal_range[Y]`` Luma component test patterns. ``signal_range[C1]`` Color difference 1 component test patterns. ``signal_range[C2]`` Color difference 2 component test patterns. These test cases are produced by encoding pictures consisting test patterns made up of entirely of legal (in range) signal values. Nevertheless, the resulting bitstreams produce large intermediate values within a decoder, though these are not guaranteed to be worst-case. .. note:: For informational purposes, an example of a set of test patterns before and after encoding and quantisation is shown below: .. image:: /_static/user_guide/signal_range_decoder.svg .. note:: The quantization indices used for lossy codecs are chosen to maximise the peak signal range produced by the test patterns. These are often higher than a typical VC-2 encoder might pick for a given bit rate but are nevertheless valid. An informative metadata file is provided along side each test case which gives, for each picture in the bitstream, the parts of a decoder which are being tested by the test patterns. See :py:class:`vc2_bit_widths.helpers.TestPoint` for details. """ try: ( analysis_luma_pictures, synthesis_luma_pictures, analysis_color_diff_pictures, synthesis_color_diff_pictures, ) = get_test_pictures(codec_features) except MissingStaticAnalysisError: logging.warning( ( "No static analysis available for the wavelet " "used by codec '%s'. Signal range test cases cannot " "be generated." ), codec_features["name"], ) return for component, analysis_test_pictures, synthesis_test_pictures in [ ("Y", analysis_luma_pictures, synthesis_luma_pictures), ("C1", analysis_color_diff_pictures, synthesis_color_diff_pictures), ("C2", analysis_color_diff_pictures, synthesis_color_diff_pictures), ]: # For lossless codecs we use the analysis test patterns since no # quantisation takes place if codec_features["lossless"]: test_pictures = analysis_test_pictures else: test_pictures = synthesis_test_pictures # Generate an initially empty set of mid-gray pictures one_gray_frame = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ) ) pictures = list( repeat_pictures( one_gray_frame, ((len(test_pictures) + len(one_gray_frame) - 1) // len(one_gray_frame)), ) ) # Fill-in the test patterns minimum_qindices = [] for test_picture, picture in zip(test_pictures, pictures): picture[component] = test_picture.picture.tolist() if codec_features["lossless"]: minimum_qindices.append(0) else: minimum_qindices.append(test_picture.quantisation_index) while len(minimum_qindices) < len(pictures): minimum_qindices.append(0) # Extract the testpoints in JSON-serialisable form metadata = [[tp._asdict() for tp in p.test_points] for p in test_pictures] # Encode sequence = make_sequence( codec_features, pictures, minimum_qindex=minimum_qindices, ) # Check the desired qindex could be used (should only ever fail for # absurdly low bitrate configurations). num_unexpected_qindices = 0 expected_qindex = None expected_qindex_iter = iter(minimum_qindices) for _, sx, sy, slice in iter_slices_in_sequence(codec_features, sequence): if sx == 0 and sy == 0: expected_qindex = next(expected_qindex_iter, 0) if slice["qindex"] != expected_qindex: num_unexpected_qindices += 1 if num_unexpected_qindices > 0: logging.warning( "Could not assign the required qindex to %d picture slices " "for signal range test case due to a small picture_bytes value. " "Peak signal levels might be reduced.", num_unexpected_qindices, ) yield TestCase( Stream(sequences=[sequence]), component, metadata=metadata, )