def test_level_sequence_restrictions_obeyed( patch_unconstratined_level_sequence_restrictions, ): # Ensure two pictures (2 fields == 1 frame) codec_features = MINIMAL_CODEC_FEATURES.copy() codec_features[ "picture_coding_mode"] = PictureCodingModes.pictures_are_fields pictures = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], )) assert len(pictures) == 2 seq = make_sequence(codec_features, pictures) assert [ data_unit["parse_info"]["parse_code"] for data_unit in seq["data_units"] ] == [ ParseCodes.sequence_header, ParseCodes.auxiliary_data, ParseCodes.sequence_header, ParseCodes.high_quality_picture, ParseCodes.sequence_header, ParseCodes.high_quality_picture, ParseCodes.padding_data, ParseCodes.end_of_sequence, ] # Sanity check assert serialize_and_decode(seq) == pictures
def picture_numbers(codec_features): """ **Tests picture numbers are correctly read from the bitstream.** Each test case contains 8 blank pictures numbered in a particular way. ``picture_numbers[start_at_zero]`` The first picture has picture number 0. ``picture_numbers[non_zero_start]`` The first picture has picture number 1000. ``picture_numbers[wrap_around]`` The first picture has picture number 4294967292, with the picture numbers wrapping around to 0 on the 4th picture in the sequence. ``picture_numbers[odd_first_picture]`` The first picture has picture number 7. This test case is only included when the picture coding mode is 0 (i.e. pictures are frames) since the first field of each frame must have an even number when the picture coding mode is 1 (i.e. pictures are fields) (11.5). """ # Create a sequence with at least 8 pictures (and 4 frames) mid_gray_pictures = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], )) mid_gray_pictures = list( repeat_pictures( mid_gray_pictures, 8 // len(mid_gray_pictures), )) test_cases = [ ("start_at_zero", [0, 1, 2, 3, 4, 5, 6, 7]), ("non_zero_start", [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007]), ("wrap_around", [4294967292, 4294967293, 4294967294, 4294967295, 0, 1, 2, 3]), ] if codec_features[ "picture_coding_mode"] == PictureCodingModes.pictures_are_frames: test_cases.append(("odd_first_picture", [7, 8, 9, 10, 11, 12, 13, 14])) for description, picture_numbers in test_cases: yield TestCase( Stream(sequences=[ make_sequence( codec_features, [ dict(picture, pic_num=pic_num) for picture, pic_num in zip(mid_gray_pictures, picture_numbers) ], ) ]), description, )
def test_iter_slices_in_sequence(profile, fragment_slice_count): codec_features = MINIMAL_CODEC_FEATURES.copy() codec_features["profile"] = profile codec_features["fragment_slice_count"] = fragment_slice_count codec_features["slices_x"] = 3 codec_features["slices_y"] = 2 codec_features["picture_bytes"] = 100 num_pictures = 2 sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), num_pictures, ), ) slices = list(iter_slices_in_sequence(codec_features, sequence)) # Should have found every slice assert len(slices) == (codec_features["slices_x"] * codec_features["slices_y"] * num_pictures) # Should have correct states if profile == Profiles.high_quality: for state, _, _, _ in slices: assert state == State( slice_prefix_bytes=0, slice_size_scaler=1, slices_x=codec_features["slices_x"], slices_y=codec_features["slices_y"], ) elif profile == Profiles.low_delay: slice_bytes = Fraction( codec_features["picture_bytes"], codec_features["slices_x"] * codec_features["slices_y"], ) for state, _, _, _ in slices: assert state == State( slice_bytes_numerator=slice_bytes.numerator, slice_bytes_denominator=slice_bytes.denominator, slices_x=codec_features["slices_x"], slices_y=codec_features["slices_y"], ) # Should have correct coordinates it = iter(slices) for _ in range(num_pictures): for exp_sy in range(codec_features["slices_y"]): for exp_sx in range(codec_features["slices_x"]): _, sx, sy, _ = next(it) assert exp_sx == sx assert exp_sy == sy
def slice_size_scaler(codec_features): """ **Tests that the 'slice_size_scaler' field is correctly handled.** This test case generates a sequence which sets slice_size_scaler value (13.5.4) 1 larger than it otherwise would be. This test case is only generated for the high quality profile, and levels which permit a slice size scaler value greater than 1. """ # Skip if not high quality profile if codec_features["profile"] != Profiles.high_quality: return None # Pick a minimum slice size scaler which is larger than the slice size # scaler which would otherwise be used if codec_features["lossless"]: # We're just going to code mid-gray frames which compress to 0 bytes so # slice size scaler = 1 is always sufficient. minimum_slice_size_scaler = 2 else: minimum_slice_size_scaler = ( get_safe_lossy_hq_slice_size_scaler( codec_features["picture_bytes"], codec_features["slices_x"] * codec_features["slices_y"], ) + 1 ) # Skip if level prohibits non-1 slice size scaler if minimum_slice_size_scaler not in allowed_values_for( LEVEL_CONSTRAINTS, "slice_size_scaler", codec_features_to_trivial_level_constraints(codec_features), ): return None sequence = make_sequence( codec_features, mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), minimum_slice_size_scaler=minimum_slice_size_scaler, ) # Force lossless coding modes to use a non-zero number of bytes for each # slice's coefficients (so that slice_size_scaler actually has to be used). if codec_features["lossless"]: for _state, _sx, _sy, hq_slice in iter_slices_in_sequence( codec_features, sequence ): assert hq_slice["slice_c2_length"] == 0 hq_slice["slice_c2_length"] = 1 return Stream(sequences=[sequence])
def test_picture_types(profile, fragment_slice_count): codec_features = MINIMAL_CODEC_FEATURES.copy() codec_features["profile"] = profile codec_features["fragment_slice_count"] = fragment_slice_count pictures = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], )) seq = make_sequence(codec_features, pictures) assert serialize_and_decode(seq) == pictures
def concatenated_sequences(codec_features): """ **Tests that streams containing multiple concatenated sequences can be decoded.** A stream consisting of the concatenation of two sequences (10.3) with one frame each, the first picture is given picture number zero in both sequences. """ sequence = make_sequence( codec_features, mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), ) return Stream(sequences=[sequence, deepcopy(sequence)])
def test_length_unchanged_for_non_lossless( self, fragment_slice_count, picture_bytes ): codec_features = CodecFeatures( MINIMAL_CODEC_FEATURES, profile=Profiles.high_quality, picture_bytes=picture_bytes, fragment_slice_count=fragment_slice_count, ) # Get length of sequence containing no prefix bytes f = BytesIO() autofill_and_serialise_stream( f, Stream( sequences=[ make_sequence( codec_features, mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), ) ] ), ) expected_data_unit_lengths = deserialise_and_measure_slice_data_unit_sizes( f.getvalue() ) # Sanity check the deserialise_and_measure_slice_data_unit_sizes # function is working... assert len(expected_data_unit_lengths) >= 1 test_cases = list(slice_prefix_bytes(codec_features)) assert len(test_cases) == 3 for test_case in test_cases: f = BytesIO() autofill_and_serialise_stream(f, test_case.value) data_unit_lengths = deserialise_and_measure_slice_data_unit_sizes( f.getvalue() ) assert data_unit_lengths == expected_data_unit_lengths
def test_iter_slice_parameters_in_sequence(fragment_slice_count): codec_features = MINIMAL_CODEC_FEATURES.copy() codec_features["profile"] = Profiles.high_quality codec_features["fragment_slice_count"] = fragment_slice_count num_pictures = 2 sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), num_pictures, ), ) slice_parameters = list(iter_slice_parameters_in_sequence(sequence)) # Should have found every set of slice parameters assert len(slice_parameters) == num_pictures
def static_gray(codec_features): """ **Tests that the decoder can decode a maximally compressible sequence.** This sequence contains an image in which every transform coefficient is zero. For most color specifications (11.4.10), this decodes to a mid-gray frame. This special case image is maximally compressible since no transform coefficients need to be explicitly coded in the bitstream. For lossless coding modes, this will also produce produce the smallest possible bitstream. """ return Stream(sequences=[ make_sequence( codec_features, mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), ) ])
def test_mid_gray(primaries, transfer_function): vp = VideoParameters( frame_width=10, frame_height=5, frame_rate_numer=1, frame_rate_denom=1, pixel_aspect_ratio_numer=1, pixel_aspect_ratio_denom=1, source_sampling=SourceSamplingModes.progressive, top_field_first=True, color_diff_format_index=ColorDifferenceSamplingFormats.color_4_4_4, # The choice of primaries and transfer function should have no effect: # the color should be chosen at the code level, not the color model # level color_primaries_index=primaries, color_matrix_index=PresetColorMatrices.hdtv, transfer_function_index=transfer_function, # Set two wonky off-center ranges; the offsets should be ignored with the # 'mid gray' being the middle of the code range luma_offset=20, luma_excursion=150, color_diff_offset=100, color_diff_excursion=900, ) pictures = list(mid_gray(vp, PictureCodingModes.pictures_are_frames)) assert len(pictures) == 1 picture = pictures[0] y = np.array(picture["Y"]) c1 = np.array(picture["C1"]) c2 = np.array(picture["C2"]) assert np.array_equal(y, np.full(y.shape, 128)) assert np.array_equal(c1, np.full(c1.shape, 512)) assert np.array_equal(c2, np.full(c2.shape, 512))
def test_minimum_qindex(kwargs, exp_qis): codec_features = MINIMAL_CODEC_FEATURES.copy() pictures = list( repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), 3, )) assert len(pictures) == 3 seq = make_sequence(codec_features, pictures, **kwargs) qis = [] for data_unit in seq["data_units"]: if "picture_parse" in data_unit: tx_data = data_unit["picture_parse"]["wavelet_transform"][ "transform_data"] qis.append(tx_data["hq_slices"][0]["qindex"]) assert qis == exp_qis
def padding_data(codec_features): """ **Tests that the contents of padding data units are ignored.** This test case consists of a sequence containing two blank frames in which every-other data unit is a padding data unit (10.4.5) of various lengths and contents (described below). ``padding_data[empty]`` Padding data units containing zero padding bytes (i.e. just consisting of a parse info header). ``padding_data[zero]`` Padding data units containing 32 bytes set to 0x00. ``padding_data[non_zero]`` Padding data units containing 32 bytes containing the ASCII encoding of the text ``Ignore this padding data please!``. ``padding_data[dummy_end_of_sequence]`` Padding data units containing 32 bytes containing an encoding of an end of sequence data unit (10.4.1). Where padding data units are not permitted by the VC-2 level in use, these test cases are omitted. """ # Generate a base sequence in which we'll modify the padding data units. We # ensure there are always at least two pictures in the sequences to make # premature termination obvious. try: base_sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), 2, ), # Insert padding data between every data unit "sequence_header (padding_data .)* padding_data end_of_sequence $", ) except IncompatibleLevelAndDataUnitError: # Padding not allowed in the supplied video format so just skip this # test return for description, data in [ ( "empty", b"", ), ( "zero", b"\x00" * 32, ), ( "non_zero", b"Ignore this padding data please!", ), ( "dummy_end_of_sequence", make_dummy_end_of_sequence().ljust(32, b"\x00"), ), ]: yield TestCase( Stream(sequences=[replace_padding_data(base_sequence, data)]), description, )
def dangling_bounded_block_data(codec_features): """ **Tests that transform values which lie beyond the end of a bounded block are read correctly.** Picture slices (13.5.3) and (13.5.4) contain transform values in bounded blocks (A.4.2). These test cases include bounded blocks in which some encoded values lie off the end of the block. Specifically, the following cases are tested: ``dangling_bounded_block_data[zero_dangling]`` .. image:: /_static/user_guide/dangling_bounded_block_data_zero_dangling.svg A zero value (1 bit) is encoded entirely beyond the end of the bounded block. ``dangling_bounded_block_data[sign_dangling]`` .. image:: /_static/user_guide/dangling_bounded_block_data_sign_dangling.svg The final bit (the sign bit) of a non-zero exp-golomb value is dangling beyond the end of the bounded block. ``dangling_bounded_block_data[stop_and_sign_dangling]`` .. image:: /_static/user_guide/dangling_bounded_block_data_stop_and_sign_dangling.svg The final two bits (the stop bit and sign bit) of a non-zero exp-golomb value are dangling beyond the end of the bounded block. ``dangling_bounded_block_data[lsb_stop_and_sign_dangling]`` .. image:: /_static/user_guide/dangling_bounded_block_data_lsb_stop_and_sign_dangling.svg The final three bits (the least significant bit, stop bit and sign bit) of a non-zero exp-golomb value are dangling beyond the end of the bounded block. .. note:: The value and magnitudes of the dangling values are chosen depending on the wavelet transform in use and might differ from the illustrations above. """ # The magnitude of the dangling value is chosen such that even if it ends # up being part of the DC component, the bit-shift used by some wavelets # won't make it disappear entirely. shift = filter_bit_shift( State( wavelet_index=codec_features["wavelet_index"], wavelet_index_ho=codec_features["wavelet_index_ho"], )) magnitude = ( (codec_features["dwt_depth"] + codec_features["dwt_depth_ho"]) * shift) + 1 # The picture components expected if codec_features["profile"] == Profiles.high_quality: picture_components = ["Y", "C1", "C2"] elif codec_features["profile"] == Profiles.low_delay: picture_components = ["Y", "C"] # Generate single-frame mid-gray sequences base_sequence = make_sequence( codec_features, mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), ) # Replace with dangling values as required for dangle_type in DanglingTransformValueType: for component in picture_components: try: sequence = deepcopy(base_sequence) for (state, sx, sy, slice) in iter_slices_in_sequence( codec_features, sequence, ): if codec_features["profile"] == Profiles.high_quality: # For lossless coding, extend the slice size to ensure some # data is used if codec_features["lossless"]: min_length = 2 else: min_length = 0 cut_off_value_at_end_of_hq_slice( state, sx, sy, slice, component, dangle_type, magnitude, min_length, ) elif codec_features["profile"] == Profiles.low_delay: cut_off_value_at_end_of_ld_slice( state, sx, sy, slice, component, dangle_type, magnitude, ) yield TestCase( Stream(sequences=[sequence]), "{}_{}".format( dangle_type.name, component, ), ) except UnsatisfiableBlockSizeError: logging.warning( ("Slices are too small to generate" "dangling_bounded_block_data[%s_%s] test case."), dangle_type.name, component, )
def slice_padding_data(codec_features): """ **Tests that padding bits in picture slices are ignored.** Picture slices (13.5.3) and (13.5.4) might contain padding bits beyond the end of the transform coefficients for each picture component. These test cases check that decoders correctly ignore these values. Padding values will be filled with the following: ``slice_padding_data[slice_?_all_zeros]`` Padding bits are all zero. ``slice_padding_data[slice_?_all_ones]`` Padding bits are all one. ``slice_padding_data[slice_?_alternating_1s_and_0s]`` Padding bits alternate between one and zero, starting with one. ``slice_padding_data[slice_?_alternating_0s_and_1s]`` Padding bits alternate between zero and one, starting with zero. ``slice_padding_data[slice_?_dummy_end_of_sequence]`` Padding bits will contain bits which encode an end of sequence data unit (10.6). The above cases are repeated for the luma and color difference picture components as indicated by the value substituted for ``?`` in the test case names above. For low-delay pictures these will be ``Y`` (luma) and ``C`` (interleaved color difference). For high quality pictures these will be ``Y`` (luma), ``C1`` (color difference 1) and ``C2`` (color difference 2). """ # The values with which to fill padding data # # [(filler, byte_align, explanation), ...] filler_values = [ (b"\x00", False, "all_zeros"), (b"\xFF", False, "all_ones"), (b"\xAA", False, "alternating_1s_and_0s"), (b"\x55", False, "alternating_0s_and_1s"), (make_dummy_end_of_sequence(), True, "dummy_end_of_sequence"), ] # The picture components expected if codec_features["profile"] == Profiles.high_quality: picture_components = ["Y", "C1", "C2"] elif codec_features["profile"] == Profiles.low_delay: picture_components = ["Y", "C"] # Generate single-frame mid-gray sequences with the specified padding data base_sequence = make_sequence( codec_features, # These pictures encode to all zeros which should give the highest # possible compression. mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), ) for filler, byte_align, explanation in filler_values: for component in picture_components: sequence = deepcopy(base_sequence) for (state, sx, sy, slice) in iter_slices_in_sequence( codec_features, sequence, ): if codec_features["profile"] == Profiles.high_quality: # For lossless coding, extend the slice size to ensure some # padding data is used if codec_features["lossless"]: min_length = (slice["slice_y_length"] + slice["slice_c1_length"] + slice["slice_c2_length"] + 8) else: min_length = 0 fill_hq_slice_padding( state, sx, sy, slice, component, filler, byte_align, min_length, ) elif codec_features["profile"] == Profiles.low_delay: fill_ld_slice_padding( state, sx, sy, slice, component, filler, byte_align, ) yield TestCase( Stream(sequences=[sequence]), "{}_{}".format( component, explanation, ), )
def slice_prefix_bytes(codec_features): """ **Tests the decoder can handle a non-zero number of slice prefix bytes.** Produces test cases with a non-zero number of slice prefix bytes containing the following values: ``slice_prefix_bytes[zeros]`` All slice prefix bytes are 0x00. ``slice_prefix_bytes[ones]`` All slice prefix bytes are 0xFF. ``slice_prefix_bytes[end_of_sequence]`` All slice prefix bytes contain bits which encode an end of sequence data unit (10.4). These test cases apply only to the high quality profile and are omitted when the low delay profile is used. """ # This test only applies to high quality codecs if codec_features["profile"] != Profiles.high_quality: return constrained_values = codec_features_to_trivial_level_constraints(codec_features) allowed_slice_prefix_bytes = allowed_values_for( LEVEL_CONSTRAINTS, "slice_prefix_bytes", constrained_values ) mid_gray_pictures = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ) ) test_cases = [ ("zeros", b"\x00"), ("ones", b"\xFF"), ("end_of_sequence", make_dummy_end_of_sequence()), ] for description, filler in test_cases: sequence = make_sequence(codec_features, mid_gray_pictures) # Determine how many slice prefix bytes we can fit in our slices if codec_features["lossless"]: # Lossless slices can be as large as we like; assign enough slice # bytes for the full set of filler bytes slice_prefix_bytes = len(filler) else: # Find the space assigned for coefficients in the smallest slice in # a fixed-bit-rate stream; we'll replace all slice coefficients # with slice prefix bytes. slice_prefix_bytes = min( ( hq_slice["slice_y_length"] + hq_slice["slice_c1_length"] + hq_slice["slice_c2_length"] ) * state["slice_size_scaler"] for state, sx, sy, hq_slice in iter_slices_in_sequence( codec_features, sequence ) ) # Check level constraints allow this slice_prefix_bytes # # NB: This implementation assumes that either the slice_prefix_bytes # field is required to be zero or it is free to be any value. This # assumption is verified for all existing VC-2 levels in the tests for # this module. Should this assumption be violated, more sophisticated # behaviour will be required here... if slice_prefix_bytes not in allowed_slice_prefix_bytes: continue if slice_prefix_bytes < len(filler): logging.warning( ( "Codec '%s' has a very small picture_bytes value " "meaning the slice_prefix_bytes[%s] test case might not " "be as useful as intended." ), codec_features["name"], description, ) # Set the number of slice prefix bytes in all slice parameter headers for slice_parameters in iter_slice_parameters_in_sequence(sequence): assert slice_parameters["slice_prefix_bytes"] == 0 slice_parameters["slice_prefix_bytes"] = slice_prefix_bytes # Add prefix bytes to all slices prefix_bytes = (filler * slice_prefix_bytes)[:slice_prefix_bytes] for state, sx, sy, hq_slice in iter_slices_in_sequence( codec_features, sequence ): hq_slice["prefix_bytes"] = prefix_bytes # Keep overall slice size the same for lossy (constant bit rate) # modes if not codec_features["lossless"]: total_length = ( hq_slice["slice_y_length"] + hq_slice["slice_c1_length"] + hq_slice["slice_c2_length"] ) total_length -= slice_prefix_bytes // state["slice_size_scaler"] hq_slice["slice_y_length"] = 0 hq_slice["slice_c1_length"] = 0 hq_slice["slice_c2_length"] = total_length yield TestCase(Stream(sequences=[sequence]), description)
def lossless_quantization(codec_features): """ **Tests support for quantization in lossless decoders.** Quantization can, in principle, be used in lossless coding modes in cases where all transform coefficients are divisible by the same factor. This test case contains a synthetic test pattern with this property. This test case is only generated for lossless codecs. .. note:: For informational purposes, an example decoded test pattern is shown below: .. image:: /_static/user_guide/lossless_quantization.png Note the faint repeating pattern. """ # Don't bother with this test for lossy coding modes (quantization is # tested elsewhere) if not codec_features["lossless"]: return None # Pick a non-zero qindex which will ensure all transform coefficients, when # set to 1 in the bitstream, will dequantize to different values (when the # quant matrix entry is different). quant_matrix = get_quantization_marix(codec_features) qindex = compute_qindex_with_distinct_quant_factors(quant_matrix) # Start with a mid-gray frame (coeffs set to 0). We'll hand-modify this to # contain all 1s because a picture which does this may be slightly larger # than the unclipped picture size and therefore we can't rely on the # encoder to produce such a signal. sequence = make_sequence( codec_features, mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), ) # Set qindex and all transform coefficients to 1 max_length = 0 for _state, _sx, _sy, hq_slice in iter_slices_in_sequence(codec_features, sequence): hq_slice["qindex"] = qindex for c in ["y", "c1", "c2"]: hq_slice["{}_transform".format(c)] = [ 1 for _ in hq_slice["{}_transform".format(c)] ] length = calculate_hq_length_field(hq_slice["{}_transform".format(c)], 1) hq_slice["slice_{}_length".format(c)] = length max_length = max(length, max_length) # Update slice size scaler to keep all length fields to 8 bits or fewer slice_size_scaler = max(1, (max_length + 254) // 255) for transform_parameters in iter_transform_parameters_in_sequence( codec_features, sequence ): transform_parameters["slice_parameters"][ "slice_size_scaler" ] = slice_size_scaler for _state, _sx, _sy, hq_slice in iter_slices_in_sequence(codec_features, sequence): for c in ["y", "c1", "c2"]: hq_slice["slice_{}_length".format(c)] += slice_size_scaler - 1 hq_slice["slice_{}_length".format(c)] //= slice_size_scaler # If the resulting picture clips, give up on this test case. We make the # assumption that while a clever lossless encoder may use quantization it # is unlikely to rely on signal clipping in the decoder. As a consequence, # to avoid producing a test case which a decoder might reasonably fail to # decode due to internal signal width limitations, we bail. # # In practice, even for the largest VC-2 filters, transform depths and # wonkiest quantisation matrices, the generated signals should fit (very) # comfortably into 8 bit video signal ranges. As such, if this check fails # it is very likely a highly degenerate codec configuration has been # specified. if check_for_signal_clipping(sequence): logging.warning( "The lossless_quantization test case generator could not produce a " "losslessly compressible image and has been omitted. This probably " "means an (impractically) high transform depth or custom quantisation " "matrix entry or an extremely low picture bit depth was used." ) return None return Stream(sequences=[sequence])
def signal_range(codec_features): """ **Tests that an encoder has sufficient numerical dynamic range.** These test cases contain test patterns designed to produce extreme signals within encoders. During these test cases, no integer clamping or overflows must occur. A test case is produced for each picture component: ``signal_range[Y]`` Luma component test patterns. ``signal_range[C1]`` Color difference 1 component test patterns. ``signal_range[C2]`` Color difference 2 component test patterns. Though the test patterns produce near worst case signal levels, they are not guaranteed to produce the largest values possible. .. note:: For informational purposes, an example of a set of test patterns are shown below: .. image:: /_static/user_guide/signal_range_encoder.png An informative metadata file is provided along side each test case which gives, for each picture in the bitstream, the parts of a encoder which are being tested by the test patterns. See :py:class:`vc2_bit_widths.helpers.TestPoint` for details. """ try: ( analysis_luma_pictures, synthesis_luma_pictures, analysis_color_diff_pictures, synthesis_color_diff_pictures, ) = get_test_pictures(codec_features) except MissingStaticAnalysisError: logging.warning( ("No static analysis available for the wavelet " "used by codec '%s'. Signal range test cases cannot " "be generated."), codec_features["name"], ) return for component, analysis_test_pictures in [ ("Y", analysis_luma_pictures), ("C1", analysis_color_diff_pictures), ("C2", analysis_color_diff_pictures), ]: # Generate an initially empty set of mid-gray pictures one_gray_frame = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], )) pictures = list( repeat_pictures( one_gray_frame, ((len(analysis_test_pictures) + len(one_gray_frame) - 1) // len(one_gray_frame)), )) # Fill-in the test patterns for test_picture, picture in zip(analysis_test_pictures, pictures): picture[component] = test_picture.picture.tolist() # Extract the testpoints in JSON-serialisable form metadata = [[tp._asdict() for tp in p.test_points] for p in analysis_test_pictures] out = EncoderTestSequence( pictures=pictures, video_parameters=codec_features["video_parameters"], picture_coding_mode=codec_features["picture_coding_mode"], ) yield TestCase(out, component, metadata=metadata)
def signal_range(codec_features): """ **Tests that a decoder has sufficient numerical dynamic range.** These test cases contain a series of pictures containing test patterns designed to produce extreme signals within decoders. During these test cases, no integer clamping (except for final output clamping) or integer overflows must occur. A test case is produced for each picture component: ``signal_range[Y]`` Luma component test patterns. ``signal_range[C1]`` Color difference 1 component test patterns. ``signal_range[C2]`` Color difference 2 component test patterns. These test cases are produced by encoding pictures consisting test patterns made up of entirely of legal (in range) signal values. Nevertheless, the resulting bitstreams produce large intermediate values within a decoder, though these are not guaranteed to be worst-case. .. note:: For informational purposes, an example of a set of test patterns before and after encoding and quantisation is shown below: .. image:: /_static/user_guide/signal_range_decoder.svg .. note:: The quantization indices used for lossy codecs are chosen to maximise the peak signal range produced by the test patterns. These are often higher than a typical VC-2 encoder might pick for a given bit rate but are nevertheless valid. An informative metadata file is provided along side each test case which gives, for each picture in the bitstream, the parts of a decoder which are being tested by the test patterns. See :py:class:`vc2_bit_widths.helpers.TestPoint` for details. """ try: ( analysis_luma_pictures, synthesis_luma_pictures, analysis_color_diff_pictures, synthesis_color_diff_pictures, ) = get_test_pictures(codec_features) except MissingStaticAnalysisError: logging.warning( ( "No static analysis available for the wavelet " "used by codec '%s'. Signal range test cases cannot " "be generated." ), codec_features["name"], ) return for component, analysis_test_pictures, synthesis_test_pictures in [ ("Y", analysis_luma_pictures, synthesis_luma_pictures), ("C1", analysis_color_diff_pictures, synthesis_color_diff_pictures), ("C2", analysis_color_diff_pictures, synthesis_color_diff_pictures), ]: # For lossless codecs we use the analysis test patterns since no # quantisation takes place if codec_features["lossless"]: test_pictures = analysis_test_pictures else: test_pictures = synthesis_test_pictures # Generate an initially empty set of mid-gray pictures one_gray_frame = list( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ) ) pictures = list( repeat_pictures( one_gray_frame, ((len(test_pictures) + len(one_gray_frame) - 1) // len(one_gray_frame)), ) ) # Fill-in the test patterns minimum_qindices = [] for test_picture, picture in zip(test_pictures, pictures): picture[component] = test_picture.picture.tolist() if codec_features["lossless"]: minimum_qindices.append(0) else: minimum_qindices.append(test_picture.quantisation_index) while len(minimum_qindices) < len(pictures): minimum_qindices.append(0) # Extract the testpoints in JSON-serialisable form metadata = [[tp._asdict() for tp in p.test_points] for p in test_pictures] # Encode sequence = make_sequence( codec_features, pictures, minimum_qindex=minimum_qindices, ) # Check the desired qindex could be used (should only ever fail for # absurdly low bitrate configurations). num_unexpected_qindices = 0 expected_qindex = None expected_qindex_iter = iter(minimum_qindices) for _, sx, sy, slice in iter_slices_in_sequence(codec_features, sequence): if sx == 0 and sy == 0: expected_qindex = next(expected_qindex_iter, 0) if slice["qindex"] != expected_qindex: num_unexpected_qindices += 1 if num_unexpected_qindices > 0: logging.warning( "Could not assign the required qindex to %d picture slices " "for signal range test case due to a small picture_bytes value. " "Peak signal levels might be reduced.", num_unexpected_qindices, ) yield TestCase( Stream(sequences=[sequence]), component, metadata=metadata, )
class TestGenericPictureGeneratorBehaviour(object): @pytest.fixture(params=[ real_pictures, moving_sprite, static_sprite, mid_gray, white_noise, linear_ramps, lambda *a, **kw: repeat_pictures(mid_gray(*a, **kw), 2), ]) def picture_generator( self, request, replace_real_pictures_with_test_pictures, # noqa: F811 ): return request.param @pytest.fixture def vp(self): return VideoParameters( frame_width=16, frame_height=8, frame_rate_numer=1, frame_rate_denom=1, pixel_aspect_ratio_numer=1, pixel_aspect_ratio_denom=1, source_sampling=SourceSamplingModes.progressive, top_field_first=True, color_diff_format_index=ColorDifferenceSamplingFormats.color_4_4_4, color_primaries_index=PresetColorPrimaries.hdtv, color_matrix_index=PresetColorMatrices.hdtv, transfer_function_index=PresetTransferFunctions.tv_gamma, luma_offset=0, luma_excursion=255, color_diff_offset=128, color_diff_excursion=255, ) @pytest.mark.parametrize("ssm", SourceSamplingModes) @pytest.mark.parametrize("pcm", PictureCodingModes) def test_produces_correct_dict_type(self, picture_generator, vp, ssm, pcm): vp["source_sampling"] = ssm int_types = (int, ) if sys.version_info < (3, 0, 0): int_types += (long, ) # noqa: F821 for picture in list(picture_generator(vp, pcm)): assert isinstance(picture["Y"], list) assert isinstance(picture["Y"][0], list) assert type(picture["Y"][0][0]) in int_types assert isinstance(picture["C1"], list) assert isinstance(picture["C1"][0], list) assert type(picture["C1"][0][0]) in int_types assert isinstance(picture["C2"], list) assert isinstance(picture["C2"][0], list) assert type(picture["C2"][0][0]) in int_types assert isinstance(picture["pic_num"], int) @pytest.mark.parametrize("ssm", SourceSamplingModes) @pytest.mark.parametrize("pcm", PictureCodingModes) def test_produces_whole_number_of_frames(self, picture_generator, vp, ssm, pcm): vp["source_sampling"] = ssm pictures = list(picture_generator(vp, pcm)) if pcm == PictureCodingModes.pictures_are_frames: assert len(pictures) >= 1 else: # Must have even number of frames assert len(pictures) >= 2 assert (len(pictures) % 2) == 0 # Even/odd fields must have even/odd picture numbers for i, picture in enumerate(pictures): assert (picture["pic_num"] % 2) == (i % 2) # Picture numbers must be consecutive last_num = pictures[0]["pic_num"] for picture in pictures[1:]: assert picture["pic_num"] == last_num + 1 last_num += 1 @pytest.mark.parametrize("ssm", SourceSamplingModes) @pytest.mark.parametrize("pcm", PictureCodingModes) @pytest.mark.parametrize("cds", ColorDifferenceSamplingFormats) def test_produces_correct_picture_sizes(self, picture_generator, vp, ssm, pcm, cds): vp["source_sampling"] = ssm vp["color_diff_format_index"] = cds picture = list(picture_generator(vp, pcm))[0] y = np.array(picture["Y"]) c1 = np.array(picture["C1"]) c2 = np.array(picture["C2"]) dd = compute_dimensions_and_depths(vp, pcm) assert y.shape == (dd["Y"].height, dd["Y"].width) assert c1.shape == (dd["C1"].height, dd["C1"].width) assert c2.shape == (dd["C2"].height, dd["C2"].width) @pytest.mark.parametrize("width,height", [(1, 1), (1, 10), (10, 1)]) def test_supports_absurd_video_sizes(self, picture_generator, vp, width, height): # Should produce a couple of frames for multi-frame generators vp["frame_rate_numer"] = 2 vp["frame_rate_denom"] = 1 vp["frame_width"] = width vp["frame_height"] = height pcm = PictureCodingModes.pictures_are_frames # Mustn't crash... list(picture_generator(vp, pcm))