Esempio n. 1
0
def real_pictures(codec_features):
    """
    **Tests real pictures are decoded correctly.**

    A series of three still photographs.

    .. image:: /_static/user_guide/real_pictures.svg

    .. note::

        The images encoded in this sequence are generated from 4256 by 2832
        pixel, 4:4:4, 16 bit, standard dynamic range, RGB color images with the
        ITU-R BT.709 gamut. As such, the decoded pictures might be of reduced
        technical quality compared with the capabilities of the format. The
        rescaling, color conversion and encoding algorithms used are also basic
        in nature, potentially further reducing the picture quality.
    """
    return Stream(
        sequences=[
            make_sequence(
                codec_features,
                picture_generators.real_pictures(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
            )
        ]
    )
Esempio n. 2
0
def test_iter_transform_parameters_in_sequence(profile, fragment_slice_count):
    codec_features = MINIMAL_CODEC_FEATURES.copy()
    codec_features["profile"] = profile
    codec_features["fragment_slice_count"] = fragment_slice_count
    codec_features["slices_x"] = 3
    codec_features["slices_y"] = 2
    codec_features["picture_bytes"] = 100

    num_pictures = 2

    sequence = make_sequence(
        codec_features,
        repeat_pictures(
            mid_gray(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
            num_pictures,
        ),
    )

    transform_parameters = list(
        iter_transform_parameters_in_sequence(codec_features, sequence))

    # Should have found every slice
    assert len(transform_parameters) == num_pictures
Esempio n. 3
0
def repeated_sequence_headers(codec_features):
    """
    **Tests the decoder can handle a stream with repeated sequence headers.**

    This test case consists of a sequence containing two frames in which the
    sequence header is repeated before every picture.

    This test will be omitted if the VC-2 level prohibits the repetition of the
    sequence header.
    """
    try:
        # Generate a base sequence in which we'll replace the sequence headers
        # later. We ensure we have at least two pictures to ensure we get
        # pictures and sequence headers being interleaved.
        sequence = make_sequence(
            codec_features,
            repeat_pictures(
                static_sprite(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
                2,
            ),
            # Force an extra sequence header between every data unit
            "(sequence_header .)+",
        )
    except IncompatibleLevelAndDataUnitError:
        # Do not try to force levels which don't support this level of sequence
        # header interleaving to accept it.
        return None

    return Stream(sequences=[sequence])
Esempio n. 4
0
def absent_next_parse_offset(codec_features):
    """
    **Tests handling of missing 'next parse offset' field.**

    The 'next parse offset' field of the ``parse_info`` header (see (10.5.1))
    can be set to zero (i.e. omitted) for pictures. This test case verifies
    that decoders are still able to decode streams with this field absent.
    """
    sequence = make_sequence(
        codec_features,
        repeat_pictures(
            mid_gray(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
            2,
        ),
    )

    # Prevent the default auto numbering of picture-containing data units
    # during serialisation
    for data_unit in sequence["data_units"]:
        parse_info = data_unit["parse_info"]
        if parse_info["parse_code"] in (
                ParseCodes.low_delay_picture,
                ParseCodes.high_quality_picture,
                ParseCodes.low_delay_picture_fragment,
                ParseCodes.high_quality_picture_fragment,
        ):
            parse_info["next_parse_offset"] = 0

    return Stream(sequences=[sequence])
Esempio n. 5
0
def picture_numbers(codec_features):
    """
    **Tests picture numbers are correctly read from the bitstream.**

    Each test case contains 8 blank pictures numbered in a particular way.

    ``picture_numbers[start_at_zero]``
        The first picture has picture number 0.

    ``picture_numbers[non_zero_start]``
        The first picture has picture number 1000.

    ``picture_numbers[wrap_around]``
        The first picture has picture number 4294967292, with the picture
        numbers wrapping around to 0 on the 4th picture in the sequence.

    ``picture_numbers[odd_first_picture]``
        The first picture has picture number 7. This test case is only included
        when the picture coding mode is 0 (i.e. pictures are frames) since the
        first field of each frame must have an even number when the picture
        coding mode is 1 (i.e. pictures are fields) (11.5).
    """
    # Create a sequence with at least 8 pictures (and 4 frames)
    mid_gray_pictures = list(
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ))
    mid_gray_pictures = list(
        repeat_pictures(
            mid_gray_pictures,
            8 // len(mid_gray_pictures),
        ))

    test_cases = [
        ("start_at_zero", [0, 1, 2, 3, 4, 5, 6, 7]),
        ("non_zero_start", [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007]),
        ("wrap_around",
         [4294967292, 4294967293, 4294967294, 4294967295, 0, 1, 2, 3]),
    ]

    if codec_features[
            "picture_coding_mode"] == PictureCodingModes.pictures_are_frames:
        test_cases.append(("odd_first_picture", [7, 8, 9, 10, 11, 12, 13, 14]))

    for description, picture_numbers in test_cases:
        yield TestCase(
            Stream(sequences=[
                make_sequence(
                    codec_features,
                    [
                        dict(picture, pic_num=pic_num) for picture, pic_num in
                        zip(mid_gray_pictures, picture_numbers)
                    ],
                )
            ]),
            description,
        )
Esempio n. 6
0
def test_iter_slices_in_sequence(profile, fragment_slice_count):
    codec_features = MINIMAL_CODEC_FEATURES.copy()
    codec_features["profile"] = profile
    codec_features["fragment_slice_count"] = fragment_slice_count
    codec_features["slices_x"] = 3
    codec_features["slices_y"] = 2
    codec_features["picture_bytes"] = 100

    num_pictures = 2

    sequence = make_sequence(
        codec_features,
        repeat_pictures(
            mid_gray(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
            num_pictures,
        ),
    )

    slices = list(iter_slices_in_sequence(codec_features, sequence))

    # Should have found every slice
    assert len(slices) == (codec_features["slices_x"] *
                           codec_features["slices_y"] * num_pictures)

    # Should have correct states
    if profile == Profiles.high_quality:
        for state, _, _, _ in slices:
            assert state == State(
                slice_prefix_bytes=0,
                slice_size_scaler=1,
                slices_x=codec_features["slices_x"],
                slices_y=codec_features["slices_y"],
            )
    elif profile == Profiles.low_delay:
        slice_bytes = Fraction(
            codec_features["picture_bytes"],
            codec_features["slices_x"] * codec_features["slices_y"],
        )
        for state, _, _, _ in slices:
            assert state == State(
                slice_bytes_numerator=slice_bytes.numerator,
                slice_bytes_denominator=slice_bytes.denominator,
                slices_x=codec_features["slices_x"],
                slices_y=codec_features["slices_y"],
            )

    # Should have correct coordinates
    it = iter(slices)
    for _ in range(num_pictures):
        for exp_sy in range(codec_features["slices_y"]):
            for exp_sx in range(codec_features["slices_x"]):
                _, sx, sy, _ = next(it)
                assert exp_sx == sx
                assert exp_sy == sy
Esempio n. 7
0
def slice_size_scaler(codec_features):
    """
    **Tests that the 'slice_size_scaler' field is correctly handled.**

    This test case generates a sequence which sets slice_size_scaler value
    (13.5.4) 1 larger than it otherwise would be.

    This test case is only generated for the high quality profile, and levels
    which permit a slice size scaler value greater than 1.
    """
    # Skip if not high quality profile
    if codec_features["profile"] != Profiles.high_quality:
        return None

    # Pick a minimum slice size scaler which is larger than the slice size
    # scaler which would otherwise be used
    if codec_features["lossless"]:
        # We're just going to code mid-gray frames which compress to 0 bytes so
        # slice size scaler = 1 is always sufficient.
        minimum_slice_size_scaler = 2
    else:
        minimum_slice_size_scaler = (
            get_safe_lossy_hq_slice_size_scaler(
                codec_features["picture_bytes"],
                codec_features["slices_x"] * codec_features["slices_y"],
            )
            + 1
        )

    # Skip if level prohibits non-1 slice size scaler
    if minimum_slice_size_scaler not in allowed_values_for(
        LEVEL_CONSTRAINTS,
        "slice_size_scaler",
        codec_features_to_trivial_level_constraints(codec_features),
    ):
        return None

    sequence = make_sequence(
        codec_features,
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
        minimum_slice_size_scaler=minimum_slice_size_scaler,
    )

    # Force lossless coding modes to use a non-zero number of bytes for each
    # slice's coefficients (so that slice_size_scaler actually has to be used).
    if codec_features["lossless"]:
        for _state, _sx, _sy, hq_slice in iter_slices_in_sequence(
            codec_features, sequence
        ):
            assert hq_slice["slice_c2_length"] == 0
            hq_slice["slice_c2_length"] = 1

    return Stream(sequences=[sequence])
Esempio n. 8
0
def static_noise(codec_features):
    """
    **Tests that decoder correctly decodes a noise plate.**

    A static frame containing pseudo-random uniform noise as illustrated below:

    .. image:: /_static/user_guide/noise.png
    """
    return Stream(sequences=[
        make_sequence(
            codec_features,
            white_noise(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
        )
    ])
def concatenated_sequences(codec_features):
    """
    **Tests that streams containing multiple concatenated sequences can be
    decoded.**

    A stream consisting of the concatenation of two sequences (10.3) with one
    frame each, the first picture is given picture number zero in both
    sequences.
    """
    sequence = make_sequence(
        codec_features,
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
    )

    return Stream(sequences=[sequence, deepcopy(sequence)])
Esempio n. 10
0
    def test_length_unchanged_for_non_lossless(
        self, fragment_slice_count, picture_bytes
    ):
        codec_features = CodecFeatures(
            MINIMAL_CODEC_FEATURES,
            profile=Profiles.high_quality,
            picture_bytes=picture_bytes,
            fragment_slice_count=fragment_slice_count,
        )

        # Get length of sequence containing no prefix bytes
        f = BytesIO()
        autofill_and_serialise_stream(
            f,
            Stream(
                sequences=[
                    make_sequence(
                        codec_features,
                        mid_gray(
                            codec_features["video_parameters"],
                            codec_features["picture_coding_mode"],
                        ),
                    )
                ]
            ),
        )
        expected_data_unit_lengths = deserialise_and_measure_slice_data_unit_sizes(
            f.getvalue()
        )
        # Sanity check the deserialise_and_measure_slice_data_unit_sizes
        # function is working...
        assert len(expected_data_unit_lengths) >= 1

        test_cases = list(slice_prefix_bytes(codec_features))

        assert len(test_cases) == 3

        for test_case in test_cases:
            f = BytesIO()
            autofill_and_serialise_stream(f, test_case.value)
            data_unit_lengths = deserialise_and_measure_slice_data_unit_sizes(
                f.getvalue()
            )
            assert data_unit_lengths == expected_data_unit_lengths
Esempio n. 11
0
def test_iter_slice_parameters_in_sequence(fragment_slice_count):
    codec_features = MINIMAL_CODEC_FEATURES.copy()
    codec_features["profile"] = Profiles.high_quality
    codec_features["fragment_slice_count"] = fragment_slice_count

    num_pictures = 2

    sequence = make_sequence(
        codec_features,
        repeat_pictures(
            mid_gray(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
            num_pictures,
        ),
    )

    slice_parameters = list(iter_slice_parameters_in_sequence(sequence))

    # Should have found every set of slice parameters
    assert len(slice_parameters) == num_pictures
Esempio n. 12
0
def static_gray(codec_features):
    """
    **Tests that the decoder can decode a maximally compressible sequence.**

    This sequence contains an image in which every transform coefficient is
    zero. For most color specifications (11.4.10), this decodes to a mid-gray
    frame.

    This special case image is maximally compressible since no transform
    coefficients need to be explicitly coded in the bitstream. For lossless
    coding modes, this will also produce produce the smallest possible
    bitstream.
    """
    return Stream(sequences=[
        make_sequence(
            codec_features,
            mid_gray(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
        )
    ])
Esempio n. 13
0
def static_ramps(codec_features):
    """
    **Tests that decoder correctly reports color encoding information.**

    This test requires that the decoded pictures are observed using the
    intended display equipment for the decoder to ensure that the relevant
    color coding metadata is passed on.

    A static frame containing linear signal ramps for white and primary
    red, green and blue (in that order, from top-to-bottom) as illustrated
    below:

    .. image:: /_static/user_guide/static_ramps.png

    The color bands must be in the correct order (white, red, green, blue from
    top to bottom). If not, the color components might have been ordered
    incorrectly.

    The red, green and blue colors should correspond to the red, green and blue
    primaries for the color specification (11.4.10.2).

    .. note::

        When D-Cinema primaries are specified (preset color primaries index 3),
        red, green and blue are replaced with CIE X, Y and Z respectively. Note
        that these might not represent physically realisable colors.

    The left-most pixels in each band are notionally video black and the
    right-most pixels video white, red, green and blue (respectively). That is,
    oversaturated signals (e.g. 'super-blacks' and 'super-white') are not
    included.

    .. note::

        For lossy codecs, the decoded signal values might vary due to coding
        artefacts.

    The value ramps in the test picture are linear, meaning that the (linear)
    pixel values increase at a constant rate from left (black) to right
    (saturated white/red/green/blue). Due to the non-linear response of human
    vision, this will produce a non-linear brightness ramp which appears to
    quickly saturate. Further, when a non-linear transfer function is specified
    (11.4.10.4) the raw decoded picture values will not be linearly spaced.

    .. note::

        When the D-Cinema transfer function is specified (preset transfer
        function index 3), the saturated signals do not correspond to a
        non-linear signal value of 1.0 but instead approximately 0.97. This is
        because the D-Cinema transfer function allocates part of its nominal
        output range to over-saturated signals.
    """
    return Stream(sequences=[
        make_sequence(
            codec_features,
            linear_ramps(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
        )
    ])
Esempio n. 14
0
def interlace_mode_and_pixel_aspect_ratio(codec_features):
    """
    **Tests that the interlacing mode and pixel aspect ratio is correctly
    decoded.**

    These tests require that the decoded pictures are observed using the
    intended display equipment for the decoder to ensure that the relevant
    display metadata is passed on.

    ``interlace_mode_and_pixel_aspect_ratio[static_sequence]``
        A single frame containing a stationary graphic at the top-left corner
        on a black background, as illustrated below.

        .. image:: /_static/user_guide/interlace_mode_and_pixel_aspect_ratio_static_sequence.svg

        If the field ordering (i.e. top field first flag, see (7.3) and (11.3))
        has been decoded correctly, the edges should be smooth. If the field
        order has been reversed the edges will appear jagged.

        If the pixel aspect ratio (see (11.4.7)) has been correctly decoded,
        the white triangle should be as wide as it is tall and the 'hole'
        should be circular.

    ``interlace_mode_and_pixel_aspect_ratio[moving_sequence]``
        A sequence of 10 frames containing a graphic moving from left to right
        along the top of the frame. In each successive frame, the graphic moves
        16 luma samples to the right (i.e. 8 samples every field, for
        interlaced formats).

        .. image:: /_static/user_guide/interlace_mode_and_pixel_aspect_ratio_moving_sequence.svg

        For progressive formats, the graphic should appear with smooth edges in
        each frame.

        For interlaced formats, the graphic should move smoothly when displayed
        on an interlaced monitor. If displayed as progressive frames (as in the
        illustration above), the pictures will appear to have ragged edges.
    """
    yield TestCase(
        Stream(sequences=[
            make_sequence(
                codec_features,
                static_sprite(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
            )
        ]),
        "static_sequence",
    )

    yield TestCase(
        Stream(sequences=[
            make_sequence(
                codec_features,
                moving_sprite(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
            )
        ]),
        "moving_sequence",
    )
Esempio n. 15
0
def dangling_bounded_block_data(codec_features):
    """
    **Tests that transform values which lie beyond the end of a bounded block
    are read correctly.**

    Picture slices (13.5.3) and (13.5.4) contain transform values in bounded
    blocks (A.4.2). These test cases include bounded blocks in which some
    encoded values lie off the end of the block. Specifically, the following
    cases are tested:

    ``dangling_bounded_block_data[zero_dangling]``
        .. image:: /_static/user_guide/dangling_bounded_block_data_zero_dangling.svg

        A zero value (1 bit) is encoded entirely beyond the end of the bounded
        block.

    ``dangling_bounded_block_data[sign_dangling]``
        .. image:: /_static/user_guide/dangling_bounded_block_data_sign_dangling.svg

        The final bit (the sign bit) of a non-zero exp-golomb value is dangling
        beyond the end of the bounded block.

    ``dangling_bounded_block_data[stop_and_sign_dangling]``
        .. image:: /_static/user_guide/dangling_bounded_block_data_stop_and_sign_dangling.svg

        The final two bits (the stop bit and sign bit) of a non-zero exp-golomb
        value are dangling beyond the end of the bounded block.

    ``dangling_bounded_block_data[lsb_stop_and_sign_dangling]``
        .. image:: /_static/user_guide/dangling_bounded_block_data_lsb_stop_and_sign_dangling.svg

        The final three bits (the least significant bit, stop bit and sign bit)
        of a non-zero exp-golomb value are dangling beyond the end of the
        bounded block.

    .. note::

        The value and magnitudes of the dangling values are chosen depending on
        the wavelet transform in use and might differ from the illustrations
        above.
    """
    # The magnitude of the dangling value is chosen such that even if it ends
    # up being part of the DC component, the bit-shift used by some wavelets
    # won't make it disappear entirely.
    shift = filter_bit_shift(
        State(
            wavelet_index=codec_features["wavelet_index"],
            wavelet_index_ho=codec_features["wavelet_index_ho"],
        ))
    magnitude = (
        (codec_features["dwt_depth"] + codec_features["dwt_depth_ho"]) *
        shift) + 1

    # The picture components expected
    if codec_features["profile"] == Profiles.high_quality:
        picture_components = ["Y", "C1", "C2"]
    elif codec_features["profile"] == Profiles.low_delay:
        picture_components = ["Y", "C"]

    # Generate single-frame mid-gray sequences
    base_sequence = make_sequence(
        codec_features,
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
    )

    # Replace with dangling values as required
    for dangle_type in DanglingTransformValueType:
        for component in picture_components:
            try:
                sequence = deepcopy(base_sequence)
                for (state, sx, sy, slice) in iter_slices_in_sequence(
                        codec_features,
                        sequence,
                ):
                    if codec_features["profile"] == Profiles.high_quality:
                        # For lossless coding, extend the slice size to ensure some
                        # data is used
                        if codec_features["lossless"]:
                            min_length = 2
                        else:
                            min_length = 0

                        cut_off_value_at_end_of_hq_slice(
                            state,
                            sx,
                            sy,
                            slice,
                            component,
                            dangle_type,
                            magnitude,
                            min_length,
                        )
                    elif codec_features["profile"] == Profiles.low_delay:
                        cut_off_value_at_end_of_ld_slice(
                            state,
                            sx,
                            sy,
                            slice,
                            component,
                            dangle_type,
                            magnitude,
                        )

                yield TestCase(
                    Stream(sequences=[sequence]),
                    "{}_{}".format(
                        dangle_type.name,
                        component,
                    ),
                )
            except UnsatisfiableBlockSizeError:
                logging.warning(
                    ("Slices are too small to generate"
                     "dangling_bounded_block_data[%s_%s] test case."),
                    dangle_type.name,
                    component,
                )
Esempio n. 16
0
def slice_padding_data(codec_features):
    """
    **Tests that padding bits in picture slices are ignored.**

    Picture slices (13.5.3) and (13.5.4) might contain padding bits beyond the
    end of the transform coefficients for each picture component. These test
    cases check that decoders correctly ignore these values. Padding values
    will be filled with the following:

    ``slice_padding_data[slice_?_all_zeros]``
        Padding bits are all zero.

    ``slice_padding_data[slice_?_all_ones]``
        Padding bits are all one.

    ``slice_padding_data[slice_?_alternating_1s_and_0s]``
        Padding bits alternate between one and zero, starting with one.

    ``slice_padding_data[slice_?_alternating_0s_and_1s]``
        Padding bits alternate between zero and one, starting with zero.

    ``slice_padding_data[slice_?_dummy_end_of_sequence]``
        Padding bits will contain bits which encode an end of sequence data
        unit (10.6).

    The above cases are repeated for the luma and color difference picture
    components as indicated by the value substituted for ``?`` in the test case
    names above. For low-delay pictures these will be ``Y`` (luma) and ``C``
    (interleaved color difference). For high quality pictures these will be
    ``Y`` (luma), ``C1`` (color difference 1) and ``C2`` (color difference 2).
    """
    # The values with which to fill padding data
    #
    # [(filler, byte_align, explanation), ...]
    filler_values = [
        (b"\x00", False, "all_zeros"),
        (b"\xFF", False, "all_ones"),
        (b"\xAA", False, "alternating_1s_and_0s"),
        (b"\x55", False, "alternating_0s_and_1s"),
        (make_dummy_end_of_sequence(), True, "dummy_end_of_sequence"),
    ]

    # The picture components expected
    if codec_features["profile"] == Profiles.high_quality:
        picture_components = ["Y", "C1", "C2"]
    elif codec_features["profile"] == Profiles.low_delay:
        picture_components = ["Y", "C"]

    # Generate single-frame mid-gray sequences with the specified padding data
    base_sequence = make_sequence(
        codec_features,
        # These pictures encode to all zeros which should give the highest
        # possible compression.
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
    )

    for filler, byte_align, explanation in filler_values:
        for component in picture_components:
            sequence = deepcopy(base_sequence)
            for (state, sx, sy, slice) in iter_slices_in_sequence(
                    codec_features,
                    sequence,
            ):
                if codec_features["profile"] == Profiles.high_quality:
                    # For lossless coding, extend the slice size to ensure some
                    # padding data is used
                    if codec_features["lossless"]:
                        min_length = (slice["slice_y_length"] +
                                      slice["slice_c1_length"] +
                                      slice["slice_c2_length"] + 8)
                    else:
                        min_length = 0

                    fill_hq_slice_padding(
                        state,
                        sx,
                        sy,
                        slice,
                        component,
                        filler,
                        byte_align,
                        min_length,
                    )
                elif codec_features["profile"] == Profiles.low_delay:
                    fill_ld_slice_padding(
                        state,
                        sx,
                        sy,
                        slice,
                        component,
                        filler,
                        byte_align,
                    )

            yield TestCase(
                Stream(sequences=[sequence]),
                "{}_{}".format(
                    component,
                    explanation,
                ),
            )
Esempio n. 17
0
def signal_range(codec_features):
    """
    **Tests that a decoder has sufficient numerical dynamic range.**

    These test cases contain a series of pictures containing test patterns
    designed to produce extreme signals within decoders. During these test
    cases, no integer clamping (except for final output clamping) or integer
    overflows must occur.

    A test case is produced for each picture component:

    ``signal_range[Y]``
        Luma component test patterns.

    ``signal_range[C1]``
        Color difference 1 component test patterns.

    ``signal_range[C2]``
        Color difference 2 component test patterns.

    These test cases are produced by encoding pictures consisting test patterns
    made up of entirely of legal (in range) signal values. Nevertheless, the
    resulting bitstreams produce large intermediate values within a decoder,
    though these are not guaranteed to be worst-case.

    .. note::

        For informational purposes, an example of a set of test patterns before
        and after encoding and quantisation is shown below:

        .. image:: /_static/user_guide/signal_range_decoder.svg

    .. note::

        The quantization indices used for lossy codecs are chosen to maximise
        the peak signal range produced by the test patterns. These are often
        higher than a typical VC-2 encoder might pick for a given bit rate but
        are nevertheless valid.

    An informative metadata file is provided along side each test case which
    gives, for each picture in the bitstream, the parts of a decoder which are
    being tested by the test patterns. See
    :py:class:`vc2_bit_widths.helpers.TestPoint` for details.
    """
    try:
        (
            analysis_luma_pictures,
            synthesis_luma_pictures,
            analysis_color_diff_pictures,
            synthesis_color_diff_pictures,
        ) = get_test_pictures(codec_features)
    except MissingStaticAnalysisError:
        logging.warning(
            (
                "No static analysis available for the wavelet "
                "used by codec '%s'. Signal range test cases cannot "
                "be generated."
            ),
            codec_features["name"],
        )
        return

    for component, analysis_test_pictures, synthesis_test_pictures in [
        ("Y", analysis_luma_pictures, synthesis_luma_pictures),
        ("C1", analysis_color_diff_pictures, synthesis_color_diff_pictures),
        ("C2", analysis_color_diff_pictures, synthesis_color_diff_pictures),
    ]:
        # For lossless codecs we use the analysis test patterns since no
        # quantisation takes place
        if codec_features["lossless"]:
            test_pictures = analysis_test_pictures
        else:
            test_pictures = synthesis_test_pictures

        # Generate an initially empty set of mid-gray pictures
        one_gray_frame = list(
            mid_gray(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            )
        )
        pictures = list(
            repeat_pictures(
                one_gray_frame,
                ((len(test_pictures) + len(one_gray_frame) - 1) // len(one_gray_frame)),
            )
        )

        # Fill-in the test patterns
        minimum_qindices = []
        for test_picture, picture in zip(test_pictures, pictures):
            picture[component] = test_picture.picture.tolist()
            if codec_features["lossless"]:
                minimum_qindices.append(0)
            else:
                minimum_qindices.append(test_picture.quantisation_index)
        while len(minimum_qindices) < len(pictures):
            minimum_qindices.append(0)

        # Extract the testpoints in JSON-serialisable form
        metadata = [[tp._asdict() for tp in p.test_points] for p in test_pictures]

        # Encode
        sequence = make_sequence(
            codec_features,
            pictures,
            minimum_qindex=minimum_qindices,
        )

        # Check the desired qindex could be used (should only ever fail for
        # absurdly low bitrate configurations).
        num_unexpected_qindices = 0
        expected_qindex = None
        expected_qindex_iter = iter(minimum_qindices)
        for _, sx, sy, slice in iter_slices_in_sequence(codec_features, sequence):
            if sx == 0 and sy == 0:
                expected_qindex = next(expected_qindex_iter, 0)
            if slice["qindex"] != expected_qindex:
                num_unexpected_qindices += 1

        if num_unexpected_qindices > 0:
            logging.warning(
                "Could not assign the required qindex to %d picture slices "
                "for signal range test case due to a small picture_bytes value. "
                "Peak signal levels might be reduced.",
                num_unexpected_qindices,
            )

        yield TestCase(
            Stream(sequences=[sequence]),
            component,
            metadata=metadata,
        )
Esempio n. 18
0
def lossless_quantization(codec_features):
    """
    **Tests support for quantization in lossless decoders.**

    Quantization can, in principle, be used in lossless coding modes in cases
    where all transform coefficients are divisible by the same factor. This
    test case contains a synthetic test pattern with this property.

    This test case is only generated for lossless codecs.

    .. note::

        For informational purposes, an example decoded test pattern is shown
        below:

        .. image:: /_static/user_guide/lossless_quantization.png

        Note the faint repeating pattern.
    """
    # Don't bother with this test for lossy coding modes (quantization is
    # tested elsewhere)
    if not codec_features["lossless"]:
        return None

    # Pick a non-zero qindex which will ensure all transform coefficients, when
    # set to 1 in the bitstream, will dequantize to different values (when the
    # quant matrix entry is different).
    quant_matrix = get_quantization_marix(codec_features)
    qindex = compute_qindex_with_distinct_quant_factors(quant_matrix)

    # Start with a mid-gray frame (coeffs set to 0). We'll hand-modify this to
    # contain all 1s because a picture which does this may be slightly larger
    # than the unclipped picture size and therefore we can't rely on the
    # encoder to produce such a signal.
    sequence = make_sequence(
        codec_features,
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
    )

    # Set qindex and all transform coefficients to 1
    max_length = 0
    for _state, _sx, _sy, hq_slice in iter_slices_in_sequence(codec_features, sequence):
        hq_slice["qindex"] = qindex
        for c in ["y", "c1", "c2"]:
            hq_slice["{}_transform".format(c)] = [
                1 for _ in hq_slice["{}_transform".format(c)]
            ]
            length = calculate_hq_length_field(hq_slice["{}_transform".format(c)], 1)
            hq_slice["slice_{}_length".format(c)] = length
            max_length = max(length, max_length)

    # Update slice size scaler to keep all length fields to 8 bits or fewer
    slice_size_scaler = max(1, (max_length + 254) // 255)
    for transform_parameters in iter_transform_parameters_in_sequence(
        codec_features, sequence
    ):
        transform_parameters["slice_parameters"][
            "slice_size_scaler"
        ] = slice_size_scaler
    for _state, _sx, _sy, hq_slice in iter_slices_in_sequence(codec_features, sequence):
        for c in ["y", "c1", "c2"]:
            hq_slice["slice_{}_length".format(c)] += slice_size_scaler - 1
            hq_slice["slice_{}_length".format(c)] //= slice_size_scaler

    # If the resulting picture clips, give up on this test case. We make the
    # assumption that while a clever lossless encoder may use quantization it
    # is unlikely to rely on signal clipping in the decoder. As a consequence,
    # to avoid producing a test case which a decoder might reasonably fail to
    # decode due to internal signal width limitations, we bail.
    #
    # In practice, even for the largest VC-2 filters, transform depths and
    # wonkiest quantisation matrices, the generated signals should fit (very)
    # comfortably into 8 bit video signal ranges. As such, if this check fails
    # it is very likely a highly degenerate codec configuration has been
    # specified.
    if check_for_signal_clipping(sequence):
        logging.warning(
            "The lossless_quantization test case generator could not produce a "
            "losslessly compressible image and has been omitted. This probably "
            "means an (impractically) high transform depth or custom quantisation "
            "matrix entry or an extremely low picture bit depth was used."
        )
        return None

    return Stream(sequences=[sequence])
Esempio n. 19
0
def extended_transform_parameters(codec_features):
    """
    **Tests that extended transform parameter flags are handled correctly.**

    Ensures that extended transform parameters fields (12.4.4) are correctly
    handled by decoders for symmetric transform modes.

    ``extended_transform_parameters[asym_transform_index_flag]``
        Verifies that ``asym_transform_index_flag`` can be set to ``1``.

    ``extended_transform_parameters[asym_transform_flag]``
        Verifies that ``asym_transform_flag`` can be set to ``1``.

    These test cases are skipped for streams whose major version is less than 3
    (which do not support the extended transform parameters header).
    Additionally, these test cases are skipped for asymmetric transforms when
    the flag being tested must already be ``1``.
    """
    # Generate a base sequence in which we'll replace the extended transform
    # parameters later
    base_sequence = make_sequence(
        codec_features,
        static_sprite(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
    )

    # Skip this test if the generated sequence does not use major_version 3
    # since no extended transform parameters field will be present.
    autofill_major_version(Stream(sequences=[base_sequence]))
    major_versions = [
        data_unit["sequence_header"]["parse_parameters"]["major_version"]
        for data_unit in base_sequence["data_units"]
        if data_unit["parse_info"]["parse_code"] == ParseCodes.sequence_header
    ]
    if major_versions[0] != 3:
        return

    # Try enabling the asym_transform_index_flag and asym_transform_flag, if
    # not already enabled and if the current level permits it
    constrained_values = codec_features_to_trivial_level_constraints(
        codec_features)
    if True in allowed_values_for(LEVEL_CONSTRAINTS,
                                  "asym_transform_index_flag",
                                  constrained_values):
        sequence, changed = update_extended_transform_parameters(
            base_sequence,
            asym_transform_index_flag=True,
            wavelet_index_ho=codec_features["wavelet_index_ho"],
        )
        if changed:
            yield TestCase(Stream(sequences=[sequence]),
                           "asym_transform_index_flag")

    if True in allowed_values_for(LEVEL_CONSTRAINTS, "asym_transform_flag",
                                  constrained_values):
        sequence, changed = update_extended_transform_parameters(
            base_sequence,
            asym_transform_flag=True,
            dwt_depth_ho=codec_features["dwt_depth_ho"],
        )
        if changed:
            yield TestCase(Stream(sequences=[sequence]), "asym_transform_flag")
Esempio n. 20
0
def padding_data(codec_features):
    """
    **Tests that the contents of padding data units are ignored.**

    This test case consists of a sequence containing two blank frames in which
    every-other data unit is a padding data unit (10.4.5) of various lengths
    and contents (described below).

    ``padding_data[empty]``
        Padding data units containing zero padding bytes (i.e. just consisting
        of a parse info header).

    ``padding_data[zero]``
        Padding data units containing 32 bytes set to 0x00.

    ``padding_data[non_zero]``
        Padding data units containing 32 bytes containing the ASCII encoding of
        the text ``Ignore this padding data please!``.

    ``padding_data[dummy_end_of_sequence]``
        Padding data units containing 32 bytes containing an encoding of an end
        of sequence data unit (10.4.1).

    Where padding data units are not permitted by the VC-2 level in use, these
    test cases are omitted.
    """
    # Generate a base sequence in which we'll modify the padding data units. We
    # ensure there are always at least two pictures in the sequences to make
    # premature termination obvious.
    try:
        base_sequence = make_sequence(
            codec_features,
            repeat_pictures(
                mid_gray(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
                2,
            ),
            # Insert padding data between every data unit
            "sequence_header (padding_data .)* padding_data end_of_sequence $",
        )
    except IncompatibleLevelAndDataUnitError:
        # Padding not allowed in the supplied video format so just skip this
        # test
        return

    for description, data in [
        (
            "empty",
            b"",
        ),
        (
            "zero",
            b"\x00" * 32,
        ),
        (
            "non_zero",
            b"Ignore this padding data please!",
        ),
        (
            "dummy_end_of_sequence",
            make_dummy_end_of_sequence().ljust(32, b"\x00"),
        ),
    ]:
        yield TestCase(
            Stream(sequences=[replace_padding_data(base_sequence, data)]),
            description,
        )
Esempio n. 21
0
def source_parameters_encodings(codec_features):
    """
    **Tests the decoder can decode different encodings of the video format
    metadata.**

    This series of test cases each contain the same source parameters (11.4),
    but in different ways.

    ``source_parameters_encodings[custom_flags_combination_?_base_video_format_?]``
        For these test cases, the base video format which most closely matches
        the desired video format is used. Each test case incrementally checks
        that source parameters can be explicitly set to their desired values
        (e.g. by setting ``custom_*_flag`` bits to 1).

    ``source_parameters_encodings[base_video_format_?]``
        These test cases, check that other base video formats can be used (and
        overridden) to specify the desired video format. Each of these test
        cases will explicitly specify as few video parameters as possible (e.g.
        setting as many ``custom_*_flag`` fields to 0 as possible).

    .. tip::

        The :ref:`vc2-bitstream-viewer` can be used to display the encoding
        used in a given test case as follows::

            $ vc2-bitstream-viewer --show sequence_header path/to/test_case.vc2

    .. note::

        Some VC-2 levels constrain the allowed encoding of source parameters in
        the bit stream and so fewer test cases will be produced in this
        instance.

    .. note::

        Not all base video formats can be used as the basis for encoding a
        specific video format. For example, the 'top field first' flag (11.3)
        set by a base video format cannot be overridden. As a result, test
        cases will not include every base video format index.

    """
    # Generate a base sequence in which we'll replace the sequence headers
    # later
    base_sequence = make_sequence(
        codec_features,
        static_sprite(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
    )

    # To keep the number of tests sensible, we'll include all sequence header
    # encodings using the best-matching base video format followed by the
    # least-custom-overridden encoding for all other base video formats. This
    # checks out as many 'custom' flags as possible (against the best-matching
    # base video format) and also checks (as best possible) the other base
    # video format values are correct.
    best_base_video_format = None
    last_base_video_format = None
    for i, sequence_header in enumerate(iter_sequence_headers(codec_features)):
        base_video_format = sequence_header["base_video_format"]

        # The iter_sequence_headers function returns headers with the best
        # matching base video format first
        if best_base_video_format is None:
            best_base_video_format = base_video_format

        # The iter_source_parameter_options produces sequence headers with
        # base video formats grouped consecutively. The first example of each
        # will use the fewest possible 'custom' flags and therefore best tests
        # that the base video format parameters are correct in the decoder.
        first_example_of_base_video_format = base_video_format != last_base_video_format
        last_base_video_format = base_video_format

        if base_video_format == best_base_video_format:
            yield TestCase(
                Stream(
                    sequences=[replace_sequence_headers(base_sequence, sequence_header)]
                ),
                "custom_flags_combination_{}_base_video_format_{:d}".format(
                    i + 1,
                    base_video_format,
                ),
            )
        elif first_example_of_base_video_format:
            yield TestCase(
                Stream(
                    sequences=[replace_sequence_headers(base_sequence, sequence_header)]
                ),
                "base_video_format_{:d}".format(base_video_format),
            )
Esempio n. 22
0
def slice_prefix_bytes(codec_features):
    """
    **Tests the decoder can handle a non-zero number of slice prefix bytes.**

    Produces test cases with a non-zero number of slice prefix bytes
    containing the following values:

    ``slice_prefix_bytes[zeros]``
        All slice prefix bytes are 0x00.

    ``slice_prefix_bytes[ones]``
        All slice prefix bytes are 0xFF.

    ``slice_prefix_bytes[end_of_sequence]``
        All slice prefix bytes contain bits which encode an end of sequence
        data unit (10.4).

    These test cases apply only to the high quality profile and are omitted
    when the low delay profile is used.
    """
    # This test only applies to high quality codecs
    if codec_features["profile"] != Profiles.high_quality:
        return

    constrained_values = codec_features_to_trivial_level_constraints(codec_features)
    allowed_slice_prefix_bytes = allowed_values_for(
        LEVEL_CONSTRAINTS, "slice_prefix_bytes", constrained_values
    )

    mid_gray_pictures = list(
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        )
    )

    test_cases = [
        ("zeros", b"\x00"),
        ("ones", b"\xFF"),
        ("end_of_sequence", make_dummy_end_of_sequence()),
    ]

    for description, filler in test_cases:
        sequence = make_sequence(codec_features, mid_gray_pictures)

        # Determine how many slice prefix bytes we can fit in our slices
        if codec_features["lossless"]:
            # Lossless slices can be as large as we like; assign enough slice
            # bytes for the full set of filler bytes
            slice_prefix_bytes = len(filler)
        else:
            # Find the space assigned for coefficients in the smallest slice in
            # a fixed-bit-rate stream; we'll replace all slice coefficients
            # with slice prefix bytes.
            slice_prefix_bytes = min(
                (
                    hq_slice["slice_y_length"]
                    + hq_slice["slice_c1_length"]
                    + hq_slice["slice_c2_length"]
                )
                * state["slice_size_scaler"]
                for state, sx, sy, hq_slice in iter_slices_in_sequence(
                    codec_features, sequence
                )
            )

        # Check level constraints allow this slice_prefix_bytes
        #
        # NB: This implementation assumes that either the slice_prefix_bytes
        # field is required to be zero or it is free to be any value. This
        # assumption is verified for all existing VC-2 levels in the tests for
        # this module. Should this assumption be violated, more sophisticated
        # behaviour will be required here...
        if slice_prefix_bytes not in allowed_slice_prefix_bytes:
            continue

        if slice_prefix_bytes < len(filler):
            logging.warning(
                (
                    "Codec '%s' has a very small picture_bytes value "
                    "meaning the slice_prefix_bytes[%s] test case might not "
                    "be as useful as intended."
                ),
                codec_features["name"],
                description,
            )

        # Set the number of slice prefix bytes in all slice parameter headers
        for slice_parameters in iter_slice_parameters_in_sequence(sequence):
            assert slice_parameters["slice_prefix_bytes"] == 0
            slice_parameters["slice_prefix_bytes"] = slice_prefix_bytes

        # Add prefix bytes to all slices
        prefix_bytes = (filler * slice_prefix_bytes)[:slice_prefix_bytes]
        for state, sx, sy, hq_slice in iter_slices_in_sequence(
            codec_features, sequence
        ):
            hq_slice["prefix_bytes"] = prefix_bytes

            # Keep overall slice size the same for lossy (constant bit rate)
            # modes
            if not codec_features["lossless"]:
                total_length = (
                    hq_slice["slice_y_length"]
                    + hq_slice["slice_c1_length"]
                    + hq_slice["slice_c2_length"]
                )

                total_length -= slice_prefix_bytes // state["slice_size_scaler"]

                hq_slice["slice_y_length"] = 0
                hq_slice["slice_c1_length"] = 0
                hq_slice["slice_c2_length"] = total_length

        yield TestCase(Stream(sequences=[sequence]), description)