예제 #1
0
def real_pictures(codec_features):
    """
    **Tests real pictures are decoded correctly.**

    A series of three still photographs.

    .. image:: /_static/user_guide/real_pictures.svg

    .. note::

        The images encoded in this sequence are generated from 4256 by 2832
        pixel, 4:4:4, 16 bit, standard dynamic range, RGB color images with the
        ITU-R BT.709 gamut. As such, the decoded pictures might be of reduced
        technical quality compared with the capabilities of the format. The
        rescaling, color conversion and encoding algorithms used are also basic
        in nature, potentially further reducing the picture quality.
    """
    return Stream(
        sequences=[
            make_sequence(
                codec_features,
                picture_generators.real_pictures(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
            )
        ]
    )
예제 #2
0
def absent_next_parse_offset(codec_features):
    """
    **Tests handling of missing 'next parse offset' field.**

    The 'next parse offset' field of the ``parse_info`` header (see (10.5.1))
    can be set to zero (i.e. omitted) for pictures. This test case verifies
    that decoders are still able to decode streams with this field absent.
    """
    sequence = make_sequence(
        codec_features,
        repeat_pictures(
            mid_gray(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
            2,
        ),
    )

    # Prevent the default auto numbering of picture-containing data units
    # during serialisation
    for data_unit in sequence["data_units"]:
        parse_info = data_unit["parse_info"]
        if parse_info["parse_code"] in (
                ParseCodes.low_delay_picture,
                ParseCodes.high_quality_picture,
                ParseCodes.low_delay_picture_fragment,
                ParseCodes.high_quality_picture_fragment,
        ):
            parse_info["next_parse_offset"] = 0

    return Stream(sequences=[sequence])
예제 #3
0
def repeated_sequence_headers(codec_features):
    """
    **Tests the decoder can handle a stream with repeated sequence headers.**

    This test case consists of a sequence containing two frames in which the
    sequence header is repeated before every picture.

    This test will be omitted if the VC-2 level prohibits the repetition of the
    sequence header.
    """
    try:
        # Generate a base sequence in which we'll replace the sequence headers
        # later. We ensure we have at least two pictures to ensure we get
        # pictures and sequence headers being interleaved.
        sequence = make_sequence(
            codec_features,
            repeat_pictures(
                static_sprite(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
                2,
            ),
            # Force an extra sequence header between every data unit
            "(sequence_header .)+",
        )
    except IncompatibleLevelAndDataUnitError:
        # Do not try to force levels which don't support this level of sequence
        # header interleaving to accept it.
        return None

    return Stream(sequences=[sequence])
예제 #4
0
    def test_pictures(self, parse_code):
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        # First in sequence should be auto-numbered to expected start
                        # offset
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            picture_parse=PictureParse(
                                picture_header=PictureHeader(picture_number=AUTO)
                            ),
                        ),
                        # If picture number not mentioned, it should be autofilled
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            picture_parse=PictureParse(picture_header=PictureHeader()),
                        ),
                        # If explicit picture number given, should be used
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            picture_parse=PictureParse(
                                picture_header=PictureHeader(picture_number=0xFFFFFFFE)
                            ),
                        ),
                        # Should continue from last explicit number if given
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            picture_parse=PictureParse(
                                picture_header=PictureHeader(picture_number=AUTO)
                            ),
                        ),
                        # Should wrap-around
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            picture_parse=PictureParse(
                                picture_header=PictureHeader(picture_number=AUTO)
                            ),
                        ),
                    ]
                )
            ]
        )

        autofill_picture_number(stream, 1234)

        picture_numbers = [
            data_unit["picture_parse"]["picture_header"]["picture_number"]
            for seq in stream["sequences"]
            for data_unit in seq["data_units"]
        ]
        assert picture_numbers == [
            1234,
            1235,
            0xFFFFFFFE,
            0xFFFFFFFF,
            0x0,
        ]
예제 #5
0
def picture_numbers(codec_features):
    """
    **Tests picture numbers are correctly read from the bitstream.**

    Each test case contains 8 blank pictures numbered in a particular way.

    ``picture_numbers[start_at_zero]``
        The first picture has picture number 0.

    ``picture_numbers[non_zero_start]``
        The first picture has picture number 1000.

    ``picture_numbers[wrap_around]``
        The first picture has picture number 4294967292, with the picture
        numbers wrapping around to 0 on the 4th picture in the sequence.

    ``picture_numbers[odd_first_picture]``
        The first picture has picture number 7. This test case is only included
        when the picture coding mode is 0 (i.e. pictures are frames) since the
        first field of each frame must have an even number when the picture
        coding mode is 1 (i.e. pictures are fields) (11.5).
    """
    # Create a sequence with at least 8 pictures (and 4 frames)
    mid_gray_pictures = list(
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ))
    mid_gray_pictures = list(
        repeat_pictures(
            mid_gray_pictures,
            8 // len(mid_gray_pictures),
        ))

    test_cases = [
        ("start_at_zero", [0, 1, 2, 3, 4, 5, 6, 7]),
        ("non_zero_start", [1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007]),
        ("wrap_around",
         [4294967292, 4294967293, 4294967294, 4294967295, 0, 1, 2, 3]),
    ]

    if codec_features[
            "picture_coding_mode"] == PictureCodingModes.pictures_are_frames:
        test_cases.append(("odd_first_picture", [7, 8, 9, 10, 11, 12, 13, 14]))

    for description, picture_numbers in test_cases:
        yield TestCase(
            Stream(sequences=[
                make_sequence(
                    codec_features,
                    [
                        dict(picture, pic_num=pic_num) for picture, pic_num in
                        zip(mid_gray_pictures, picture_numbers)
                    ],
                )
            ]),
            description,
        )
예제 #6
0
def slice_size_scaler(codec_features):
    """
    **Tests that the 'slice_size_scaler' field is correctly handled.**

    This test case generates a sequence which sets slice_size_scaler value
    (13.5.4) 1 larger than it otherwise would be.

    This test case is only generated for the high quality profile, and levels
    which permit a slice size scaler value greater than 1.
    """
    # Skip if not high quality profile
    if codec_features["profile"] != Profiles.high_quality:
        return None

    # Pick a minimum slice size scaler which is larger than the slice size
    # scaler which would otherwise be used
    if codec_features["lossless"]:
        # We're just going to code mid-gray frames which compress to 0 bytes so
        # slice size scaler = 1 is always sufficient.
        minimum_slice_size_scaler = 2
    else:
        minimum_slice_size_scaler = (
            get_safe_lossy_hq_slice_size_scaler(
                codec_features["picture_bytes"],
                codec_features["slices_x"] * codec_features["slices_y"],
            )
            + 1
        )

    # Skip if level prohibits non-1 slice size scaler
    if minimum_slice_size_scaler not in allowed_values_for(
        LEVEL_CONSTRAINTS,
        "slice_size_scaler",
        codec_features_to_trivial_level_constraints(codec_features),
    ):
        return None

    sequence = make_sequence(
        codec_features,
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
        minimum_slice_size_scaler=minimum_slice_size_scaler,
    )

    # Force lossless coding modes to use a non-zero number of bytes for each
    # slice's coefficients (so that slice_size_scaler actually has to be used).
    if codec_features["lossless"]:
        for _state, _sx, _sy, hq_slice in iter_slices_in_sequence(
            codec_features, sequence
        ):
            assert hq_slice["slice_c2_length"] == 0
            hq_slice["slice_c2_length"] = 1

    return Stream(sequences=[sequence])
예제 #7
0
    def test_insertion_of_parse_parameters_when_absent(self):
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.sequence_header
                            )
                        )
                    ]
                )
            ]
        )
        autofill_major_version(stream)

        # A parse parameters field (and the inferred version number) should
        # have been inserted (since the major_version field defaults to AUTO)
        assert stream == Stream(
            sequences=[
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.sequence_header
                            ),
                            sequence_header=SequenceHeader(
                                parse_parameters=ParseParameters(
                                    major_version=2,
                                ),
                            ),
                        )
                    ]
                )
            ]
        )
예제 #8
0
    def test_padding_and_aux_data_default_data(self, parse_code):
        # No data given (default (empty) data should be assumed)
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[DataUnit(parse_info=ParseInfo(parse_code=parse_code))]
                )
            ]
        )

        assert autofill_parse_offsets(stream) == ([], [(0, 0)])

        parse_info = stream["sequences"][0]["data_units"][0]["parse_info"]
        assert parse_info["next_parse_offset"] == 13
        assert parse_info["previous_parse_offset"] == 0
예제 #9
0
    def test_padding_and_aux_data(self, parse_code):
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        # Next parse offset not given (should be treated as auto)
                        DataUnit(parse_info=ParseInfo(parse_code=parse_code)),
                        # Next parse offset is explicitly AUTO
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                                next_parse_offset=AUTO,
                            ),
                        ),
                        # Next parse offset is given (should not be modified)
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                                next_parse_offset=100,
                            ),
                        ),
                    ]
                )
            ]
        )
        for seq in stream["sequences"]:
            for data_unit in seq["data_units"]:
                if parse_code == tables.ParseCodes.padding_data:
                    data_unit["padding"] = Padding(bytes=b"1234")
                elif parse_code == tables.ParseCodes.auxiliary_data:
                    data_unit["auxiliary_data"] = AuxiliaryData(bytes=b"1234")

        assert autofill_parse_offsets(stream) == ([], [(0, 0), (0, 1), (0, 2)])

        next_parse_offsets = [
            data_unit["parse_info"]["next_parse_offset"]
            for seq in stream["sequences"]
            for data_unit in seq["data_units"]
        ]
        assert next_parse_offsets == [13 + 4, 13 + 4, 100]

        previous_parse_offsets = [
            data_unit["parse_info"]["previous_parse_offset"]
            for seq in stream["sequences"]
            for data_unit in seq["data_units"]
        ]
        assert previous_parse_offsets == [0, 0, 0]
예제 #10
0
 def test_minimal_sequence_unmodified(self):
     stream = Stream(
         sequences=[
             Sequence(
                 data_units=[
                     DataUnit(
                         parse_info=ParseInfo(
                             parse_code=tables.ParseCodes.end_of_sequence
                         )
                     )
                 ]
             )
         ]
     )
     stream_orig = deepcopy(stream)
     autofill_major_version(stream)
     assert stream == stream_orig
예제 #11
0
def static_noise(codec_features):
    """
    **Tests that decoder correctly decodes a noise plate.**

    A static frame containing pseudo-random uniform noise as illustrated below:

    .. image:: /_static/user_guide/noise.png
    """
    return Stream(sequences=[
        make_sequence(
            codec_features,
            white_noise(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
        )
    ])
예제 #12
0
def concatenated_sequences(codec_features):
    """
    **Tests that streams containing multiple concatenated sequences can be
    decoded.**

    A stream consisting of the concatenation of two sequences (10.3) with one
    frame each, the first picture is given picture number zero in both
    sequences.
    """
    sequence = make_sequence(
        codec_features,
        mid_gray(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
    )

    return Stream(sequences=[sequence, deepcopy(sequence)])
예제 #13
0
    def test_length_unchanged_for_non_lossless(
        self, fragment_slice_count, picture_bytes
    ):
        codec_features = CodecFeatures(
            MINIMAL_CODEC_FEATURES,
            profile=Profiles.high_quality,
            picture_bytes=picture_bytes,
            fragment_slice_count=fragment_slice_count,
        )

        # Get length of sequence containing no prefix bytes
        f = BytesIO()
        autofill_and_serialise_stream(
            f,
            Stream(
                sequences=[
                    make_sequence(
                        codec_features,
                        mid_gray(
                            codec_features["video_parameters"],
                            codec_features["picture_coding_mode"],
                        ),
                    )
                ]
            ),
        )
        expected_data_unit_lengths = deserialise_and_measure_slice_data_unit_sizes(
            f.getvalue()
        )
        # Sanity check the deserialise_and_measure_slice_data_unit_sizes
        # function is working...
        assert len(expected_data_unit_lengths) >= 1

        test_cases = list(slice_prefix_bytes(codec_features))

        assert len(test_cases) == 3

        for test_case in test_cases:
            f = BytesIO()
            autofill_and_serialise_stream(f, test_case.value)
            data_unit_lengths = deserialise_and_measure_slice_data_unit_sizes(
                f.getvalue()
            )
            assert data_unit_lengths == expected_data_unit_lengths
예제 #14
0
def serialize_and_decode(sequence):
    # Serialise
    f = BytesIO()
    autofill_and_serialise_stream(f, Stream(sequences=[sequence]))

    # Setup callback to capture decoded pictures
    decoded_pictures = []

    def output_picture_callback(picture, video_parameters,
                                picture_coding_mode):
        decoded_pictures.append(picture)

    # Feed to conformance checking decoder
    f.seek(0)
    state = State(_output_picture_callback=output_picture_callback)
    init_io(state, f)
    parse_stream(state)

    return decoded_pictures
예제 #15
0
def check_for_signal_clipping(sequence):
    """
    Given a :py:class:`vc2_conformance.bitstream.Sequence`, return True if any
    picture component signal was clipped during decoding.
    """
    # NB: Internally we just check for saturated signal levels. This way we
    # avoid the need to modify the decoder to remove the clipper and all that
    # faff...

    # Serialise
    f = BytesIO()
    # NB: Deepcopy required due to autofill_and_serialise_stream mutating the
    # stream
    stream = Stream(sequences=[deepcopy(sequence)])
    autofill_and_serialise_stream(f, stream)
    f.seek(0)

    # Decode and look for saturated pixel values
    state = State()
    may_have_clipped = [False]

    def output_picture_callback(picture, video_parameters, picture_coding_mode):
        components_and_depths = [
            ("Y", state["luma_depth"]),
            ("C1", state["color_diff_depth"]),
            ("C2", state["color_diff_depth"]),
        ]

        for component, depth in components_and_depths:
            min_value = min(min(row) for row in picture[component])
            max_value = max(max(row) for row in picture[component])
            if min_value == 0:
                may_have_clipped[0] = True
            if max_value == (1 << depth) - 1:
                may_have_clipped[0] = True

    state["_output_picture_callback"] = output_picture_callback
    init_io(state, f)
    parse_stream(state)

    return may_have_clipped[0]
예제 #16
0
def static_gray(codec_features):
    """
    **Tests that the decoder can decode a maximally compressible sequence.**

    This sequence contains an image in which every transform coefficient is
    zero. For most color specifications (11.4.10), this decodes to a mid-gray
    frame.

    This special case image is maximally compressible since no transform
    coefficients need to be explicitly coded in the bitstream. For lossless
    coding modes, this will also produce produce the smallest possible
    bitstream.
    """
    return Stream(sequences=[
        make_sequence(
            codec_features,
            mid_gray(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
        )
    ])
예제 #17
0
 def test_manual_version_numbers_unaltered(self):
     # The following stream includes a (conflicting) major version of 2 and
     # defined extended transform parameters. Because the major version is
     # explicit, the auto filler should just ignore the conflicting
     # version/ETP.
     stream = Stream(
         sequences=[
             Sequence(
                 data_units=[
                     DataUnit(
                         parse_info=ParseInfo(
                             parse_code=tables.ParseCodes.sequence_header
                         ),
                         sequence_header=SequenceHeader(
                             parse_parameters=ParseParameters(
                                 major_version=2,
                             ),
                         ),
                     ),
                     DataUnit(
                         parse_info=ParseInfo(
                             parse_code=tables.ParseCodes.high_quality_picture_fragment
                         ),
                         fragment_parse=FragmentParse(
                             transform_parameters=TransformParameters(
                                 extended_transform_parameters=ExtendedTransformParameters(
                                     asym_transform_flag=True,
                                     dwt_depth_ho=2,
                                 ),
                             ),
                         ),
                     ),
                 ]
             )
         ]
     )
     stream_orig = deepcopy(stream)
     autofill_major_version(stream)
     assert stream == stream_orig
예제 #18
0
    def test_values_to_be_set_later_are_set_to_zero(self, parse_code, explicit_auto):
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        # An automatically set data unit
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                            )
                        ),
                        # One which is explicitly set (and should not be overridden)
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                                next_parse_offset=100,
                                previous_parse_offset=200,
                            )
                        ),
                    ]
                )
            ]
        )
        if explicit_auto:
            parse_info = stream["sequences"][0]["data_units"][0]["parse_info"]
            parse_info["next_parse_offset"] = AUTO
            parse_info["previous_parse_offset"] = AUTO

        assert autofill_parse_offsets(stream) == ([(0, 0)], [(0, 0)])

        parse_info_0 = stream["sequences"][0]["data_units"][0]["parse_info"]
        assert parse_info_0["next_parse_offset"] == 0
        assert parse_info_0["previous_parse_offset"] == 0

        parse_info_1 = stream["sequences"][0]["data_units"][1]["parse_info"]
        assert parse_info_1["next_parse_offset"] == 100
        assert parse_info_1["previous_parse_offset"] == 200
예제 #19
0
    def test_multiple_sequences(self, parse_code):
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        DataUnit(parse_info=ParseInfo(parse_code=parse_code)),
                        DataUnit(parse_info=ParseInfo(parse_code=parse_code)),
                        DataUnit(parse_info=ParseInfo(parse_code=parse_code)),
                    ]
                ),
                Sequence(
                    data_units=[
                        DataUnit(parse_info=ParseInfo(parse_code=parse_code)),
                        DataUnit(parse_info=ParseInfo(parse_code=parse_code)),
                        DataUnit(parse_info=ParseInfo(parse_code=parse_code)),
                    ]
                ),
            ]
        )

        autofill_picture_number(stream, 1234)

        picture_numbers = [
            data_unit["picture_parse"]["picture_header"]["picture_number"]
            for seq in stream["sequences"]
            for data_unit in seq["data_units"]
        ]
        assert picture_numbers == [
            1234,
            1235,
            1236,
            # Restarts in second sequence
            1234,
            1235,
            1236,
        ]
예제 #20
0
    def test_finalizer_works(self):
        f = BytesIO()
        w = BitstreamWriter(f)

        # Sequence with every data unit type and fully automatic numbers
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.sequence_header
                            ),
                            sequence_header=SequenceHeader(
                                parse_parameters=ParseParameters(major_version=3),
                                video_parameters=SourceParameters(
                                    # Tiny custom frame-size used to reduce test suite
                                    # runtime
                                    frame_size=FrameSize(
                                        custom_dimensions_flag=True,
                                        frame_width=4,
                                        frame_height=4,
                                    )
                                ),
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.high_quality_picture
                            ),
                            picture_parse=PictureParse(
                                picture_header=PictureHeader(picture_number=0)
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.low_delay_picture
                            ),
                            picture_parse=PictureParse(
                                picture_header=PictureHeader(picture_number=0)
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.high_quality_picture_fragment
                            ),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(picture_number=0)
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.high_quality_picture_fragment
                            ),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(picture_number=0)
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            ),
                            padding=Padding(bytes=b"123"),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.auxiliary_data
                            ),
                            auxiliary_data=AuxiliaryData(bytes=b"123"),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.end_of_sequence
                            ),
                        ),
                    ]
                )
            ]
        )

        (
            next_parse_offsets_to_autofill,
            previous_parse_offsets_to_autofill,
        ) = autofill_parse_offsets(stream)

        with Serialiser(w, stream, vc2_default_values_with_auto) as serdes:
            vc2.parse_stream(serdes, State())
        w.flush()

        offset_before = w.tell()
        autofill_parse_offsets_finalize(
            w,
            serdes.context,
            next_parse_offsets_to_autofill,
            previous_parse_offsets_to_autofill,
        )
        assert w.tell() == offset_before

        f.seek(0)
        r = BitstreamReader(f)
        with Deserialiser(r) as serdes:
            vc2.parse_stream(serdes, State())

        parse_infos = [
            data_unit["parse_info"]
            for sequence in serdes.context["sequences"]
            for data_unit in sequence["data_units"]
        ]

        # Check for start/end offsets being zero
        assert parse_infos[0]["previous_parse_offset"] == 0
        assert parse_infos[-1]["next_parse_offset"] == 0

        # Check for consistency and plusibility of offsets
        for pi1, pi2 in zip(parse_infos, parse_infos[1:]):
            assert pi1["next_parse_offset"] > 13
            assert pi2["previous_parse_offset"] > 13

            assert pi1["next_parse_offset"] == pi2["previous_parse_offset"]
예제 #21
0
def interlace_mode_and_pixel_aspect_ratio(codec_features):
    """
    **Tests that the interlacing mode and pixel aspect ratio is correctly
    decoded.**

    These tests require that the decoded pictures are observed using the
    intended display equipment for the decoder to ensure that the relevant
    display metadata is passed on.

    ``interlace_mode_and_pixel_aspect_ratio[static_sequence]``
        A single frame containing a stationary graphic at the top-left corner
        on a black background, as illustrated below.

        .. image:: /_static/user_guide/interlace_mode_and_pixel_aspect_ratio_static_sequence.svg

        If the field ordering (i.e. top field first flag, see (7.3) and (11.3))
        has been decoded correctly, the edges should be smooth. If the field
        order has been reversed the edges will appear jagged.

        If the pixel aspect ratio (see (11.4.7)) has been correctly decoded,
        the white triangle should be as wide as it is tall and the 'hole'
        should be circular.

    ``interlace_mode_and_pixel_aspect_ratio[moving_sequence]``
        A sequence of 10 frames containing a graphic moving from left to right
        along the top of the frame. In each successive frame, the graphic moves
        16 luma samples to the right (i.e. 8 samples every field, for
        interlaced formats).

        .. image:: /_static/user_guide/interlace_mode_and_pixel_aspect_ratio_moving_sequence.svg

        For progressive formats, the graphic should appear with smooth edges in
        each frame.

        For interlaced formats, the graphic should move smoothly when displayed
        on an interlaced monitor. If displayed as progressive frames (as in the
        illustration above), the pictures will appear to have ragged edges.
    """
    yield TestCase(
        Stream(sequences=[
            make_sequence(
                codec_features,
                static_sprite(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
            )
        ]),
        "static_sequence",
    )

    yield TestCase(
        Stream(sequences=[
            make_sequence(
                codec_features,
                moving_sprite(
                    codec_features["video_parameters"],
                    codec_features["picture_coding_mode"],
                ),
            )
        ]),
        "moving_sequence",
    )
예제 #22
0
class TestAutofillParseOffsets(object):
    @pytest.mark.parametrize(
        "stream", [Stream(), Stream(sequences=[Sequence(data_units=[])])]
    )
    def test_doesnt_crash_on_empty_stream(self, stream):
        stream_orig = deepcopy(stream)
        assert autofill_parse_offsets(stream) == ([], [])
        assert stream == stream_orig

    @pytest.mark.parametrize(
        "parse_code",
        [tables.ParseCodes.padding_data, tables.ParseCodes.auxiliary_data],
    )
    def test_padding_and_aux_data(self, parse_code):
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        # Next parse offset not given (should be treated as auto)
                        DataUnit(parse_info=ParseInfo(parse_code=parse_code)),
                        # Next parse offset is explicitly AUTO
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                                next_parse_offset=AUTO,
                            ),
                        ),
                        # Next parse offset is given (should not be modified)
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                                next_parse_offset=100,
                            ),
                        ),
                    ]
                )
            ]
        )
        for seq in stream["sequences"]:
            for data_unit in seq["data_units"]:
                if parse_code == tables.ParseCodes.padding_data:
                    data_unit["padding"] = Padding(bytes=b"1234")
                elif parse_code == tables.ParseCodes.auxiliary_data:
                    data_unit["auxiliary_data"] = AuxiliaryData(bytes=b"1234")

        assert autofill_parse_offsets(stream) == ([], [(0, 0), (0, 1), (0, 2)])

        next_parse_offsets = [
            data_unit["parse_info"]["next_parse_offset"]
            for seq in stream["sequences"]
            for data_unit in seq["data_units"]
        ]
        assert next_parse_offsets == [13 + 4, 13 + 4, 100]

        previous_parse_offsets = [
            data_unit["parse_info"]["previous_parse_offset"]
            for seq in stream["sequences"]
            for data_unit in seq["data_units"]
        ]
        assert previous_parse_offsets == [0, 0, 0]

    @pytest.mark.parametrize(
        "parse_code",
        [tables.ParseCodes.padding_data, tables.ParseCodes.auxiliary_data],
    )
    def test_padding_and_aux_data_default_data(self, parse_code):
        # No data given (default (empty) data should be assumed)
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[DataUnit(parse_info=ParseInfo(parse_code=parse_code))]
                )
            ]
        )

        assert autofill_parse_offsets(stream) == ([], [(0, 0)])

        parse_info = stream["sequences"][0]["data_units"][0]["parse_info"]
        assert parse_info["next_parse_offset"] == 13
        assert parse_info["previous_parse_offset"] == 0

    @pytest.mark.parametrize("explicit_auto", [True, False])
    @pytest.mark.parametrize(
        "parse_code",
        [
            parse_code
            for parse_code in tables.ParseCodes
            if parse_code
            not in (
                tables.ParseCodes.padding_data,
                tables.ParseCodes.auxiliary_data,
            )
        ],
    )
    def test_values_to_be_set_later_are_set_to_zero(self, parse_code, explicit_auto):
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        # An automatically set data unit
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                            )
                        ),
                        # One which is explicitly set (and should not be overridden)
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                                next_parse_offset=100,
                                previous_parse_offset=200,
                            )
                        ),
                    ]
                )
            ]
        )
        if explicit_auto:
            parse_info = stream["sequences"][0]["data_units"][0]["parse_info"]
            parse_info["next_parse_offset"] = AUTO
            parse_info["previous_parse_offset"] = AUTO

        assert autofill_parse_offsets(stream) == ([(0, 0)], [(0, 0)])

        parse_info_0 = stream["sequences"][0]["data_units"][0]["parse_info"]
        assert parse_info_0["next_parse_offset"] == 0
        assert parse_info_0["previous_parse_offset"] == 0

        parse_info_1 = stream["sequences"][0]["data_units"][1]["parse_info"]
        assert parse_info_1["next_parse_offset"] == 100
        assert parse_info_1["previous_parse_offset"] == 200

    def test_finalizer_works(self):
        f = BytesIO()
        w = BitstreamWriter(f)

        # Sequence with every data unit type and fully automatic numbers
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.sequence_header
                            ),
                            sequence_header=SequenceHeader(
                                parse_parameters=ParseParameters(major_version=3),
                                video_parameters=SourceParameters(
                                    # Tiny custom frame-size used to reduce test suite
                                    # runtime
                                    frame_size=FrameSize(
                                        custom_dimensions_flag=True,
                                        frame_width=4,
                                        frame_height=4,
                                    )
                                ),
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.high_quality_picture
                            ),
                            picture_parse=PictureParse(
                                picture_header=PictureHeader(picture_number=0)
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.low_delay_picture
                            ),
                            picture_parse=PictureParse(
                                picture_header=PictureHeader(picture_number=0)
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.high_quality_picture_fragment
                            ),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(picture_number=0)
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.high_quality_picture_fragment
                            ),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(picture_number=0)
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            ),
                            padding=Padding(bytes=b"123"),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.auxiliary_data
                            ),
                            auxiliary_data=AuxiliaryData(bytes=b"123"),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.end_of_sequence
                            ),
                        ),
                    ]
                )
            ]
        )

        (
            next_parse_offsets_to_autofill,
            previous_parse_offsets_to_autofill,
        ) = autofill_parse_offsets(stream)

        with Serialiser(w, stream, vc2_default_values_with_auto) as serdes:
            vc2.parse_stream(serdes, State())
        w.flush()

        offset_before = w.tell()
        autofill_parse_offsets_finalize(
            w,
            serdes.context,
            next_parse_offsets_to_autofill,
            previous_parse_offsets_to_autofill,
        )
        assert w.tell() == offset_before

        f.seek(0)
        r = BitstreamReader(f)
        with Deserialiser(r) as serdes:
            vc2.parse_stream(serdes, State())

        parse_infos = [
            data_unit["parse_info"]
            for sequence in serdes.context["sequences"]
            for data_unit in sequence["data_units"]
        ]

        # Check for start/end offsets being zero
        assert parse_infos[0]["previous_parse_offset"] == 0
        assert parse_infos[-1]["next_parse_offset"] == 0

        # Check for consistency and plusibility of offsets
        for pi1, pi2 in zip(parse_infos, parse_infos[1:]):
            assert pi1["next_parse_offset"] > 13
            assert pi2["previous_parse_offset"] > 13

            assert pi1["next_parse_offset"] == pi2["previous_parse_offset"]

    def test_works_on_multiple_sequences(self):
        f = BytesIO()
        w = BitstreamWriter(f)

        # Sequence with every data unit type and fully automatic numbers
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            )
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            )
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.end_of_sequence
                            )
                        ),
                    ]
                ),
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            )
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            )
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.end_of_sequence
                            )
                        ),
                    ]
                ),
            ]
        )

        (
            next_parse_offsets_to_autofill,
            previous_parse_offsets_to_autofill,
        ) = autofill_parse_offsets(stream)

        print(stream)
        with Serialiser(w, stream, vc2_default_values_with_auto) as serdes:
            vc2.parse_stream(serdes, State())
        w.flush()

        autofill_parse_offsets_finalize(
            w,
            serdes.context,
            next_parse_offsets_to_autofill,
            previous_parse_offsets_to_autofill,
        )

        f.seek(0)
        r = BitstreamReader(f)
        with Deserialiser(r) as serdes:
            vc2.parse_stream(serdes, State())

        parse_infos = [
            [data_unit["parse_info"] for data_unit in sequence["data_units"]]
            for sequence in serdes.context["sequences"]
        ]

        # Check for start/end offsets being zero
        for sequence_pis in parse_infos:
            assert sequence_pis[0]["previous_parse_offset"] == 0
            assert sequence_pis[-1]["next_parse_offset"] == 0

            # Check for offset correctness
            for pi1, pi2 in zip(sequence_pis, sequence_pis[1:]):
                assert pi1["next_parse_offset"] == 13
                assert pi2["previous_parse_offset"] == 13
예제 #23
0
 def test_removal_of_extended_transform_parameters(self):
     # NB: The stream specified below is actually compatible with version 2
     # so the extended transform parameters field should be removed if and
     # only if the major version was set to AUTO in the proceeding sequence
     # header.
     stream = Stream(
         sequences=[
             Sequence(
                 data_units=[
                     DataUnit(
                         parse_info=ParseInfo(
                             parse_code=tables.ParseCodes.sequence_header,
                         ),
                     ),
                     DataUnit(
                         parse_info=ParseInfo(
                             parse_code=tables.ParseCodes.high_quality_picture,
                         ),
                         picture_parse=PictureParse(
                             wavelet_transform=WaveletTransform(
                                 transform_parameters=TransformParameters(
                                     wavelet_index=tables.WaveletFilters.haar_no_shift,
                                     dwt_depth=2,
                                     extended_transform_parameters=ExtendedTransformParameters(
                                         asym_transform_index_flag=True,
                                         wavelet_index_ho=tables.WaveletFilters.haar_no_shift,
                                         asym_transform_flag=True,
                                         dwt_depth_ho=0,
                                     ),
                                 ),
                             ),
                         ),
                     ),
                 ]
             )
         ]
     )
     autofill_major_version(stream)
     assert stream == Stream(
         sequences=[
             Sequence(
                 data_units=[
                     DataUnit(
                         parse_info=ParseInfo(
                             parse_code=tables.ParseCodes.sequence_header,
                         ),
                         sequence_header=SequenceHeader(
                             parse_parameters=ParseParameters(
                                 major_version=2,
                             ),
                         ),
                     ),
                     DataUnit(
                         parse_info=ParseInfo(
                             parse_code=tables.ParseCodes.high_quality_picture,
                         ),
                         picture_parse=PictureParse(
                             wavelet_transform=WaveletTransform(
                                 transform_parameters=TransformParameters(
                                     wavelet_index=tables.WaveletFilters.haar_no_shift,
                                     dwt_depth=2,
                                 ),
                             ),
                         ),
                     ),
                 ]
             )
         ]
     )
예제 #24
0
def static_ramps(codec_features):
    """
    **Tests that decoder correctly reports color encoding information.**

    This test requires that the decoded pictures are observed using the
    intended display equipment for the decoder to ensure that the relevant
    color coding metadata is passed on.

    A static frame containing linear signal ramps for white and primary
    red, green and blue (in that order, from top-to-bottom) as illustrated
    below:

    .. image:: /_static/user_guide/static_ramps.png

    The color bands must be in the correct order (white, red, green, blue from
    top to bottom). If not, the color components might have been ordered
    incorrectly.

    The red, green and blue colors should correspond to the red, green and blue
    primaries for the color specification (11.4.10.2).

    .. note::

        When D-Cinema primaries are specified (preset color primaries index 3),
        red, green and blue are replaced with CIE X, Y and Z respectively. Note
        that these might not represent physically realisable colors.

    The left-most pixels in each band are notionally video black and the
    right-most pixels video white, red, green and blue (respectively). That is,
    oversaturated signals (e.g. 'super-blacks' and 'super-white') are not
    included.

    .. note::

        For lossy codecs, the decoded signal values might vary due to coding
        artefacts.

    The value ramps in the test picture are linear, meaning that the (linear)
    pixel values increase at a constant rate from left (black) to right
    (saturated white/red/green/blue). Due to the non-linear response of human
    vision, this will produce a non-linear brightness ramp which appears to
    quickly saturate. Further, when a non-linear transfer function is specified
    (11.4.10.4) the raw decoded picture values will not be linearly spaced.

    .. note::

        When the D-Cinema transfer function is specified (preset transfer
        function index 3), the saturated signals do not correspond to a
        non-linear signal value of 1.0 but instead approximately 0.97. This is
        because the D-Cinema transfer function allocates part of its nominal
        output range to over-saturated signals.
    """
    return Stream(sequences=[
        make_sequence(
            codec_features,
            linear_ramps(
                codec_features["video_parameters"],
                codec_features["picture_coding_mode"],
            ),
        )
    ])
예제 #25
0
    def test_version_selection(self, parameters, exp_version):
        profile = parameters.get("profile", tables.Profiles.low_delay)
        frame_rate_index = parameters.get("frame_rate_index")
        signal_range_index = parameters.get("signal_range_index")
        color_spec_index = parameters.get("color_spec_index", 0)
        color_primaries_index = parameters.get("color_primaries_index", 0)
        color_matrix_index = parameters.get("color_matrix_index", 0)
        transfer_function_index = parameters.get("transfer_function_index", 0)
        wavelet_index = parameters.get(
            "wavelet_index", tables.WaveletFilters.haar_no_shift
        )
        wavelet_index_ho = parameters.get("wavelet_index_ho")
        dwt_depth_ho = parameters.get("dwt_depth_ho", None)
        parse_code = parameters.get("parse_code", tables.ParseCodes.low_delay_picture)

        # Kept separate to allow later checking of the version chosen
        pp = ParseParameters(major_version=AUTO, profile=profile)

        # Repeated in the appropriate place for fragments and pictures
        tp = TransformParameters(
            wavelet_index=wavelet_index,
            dwt_depth=2,
            extended_transform_parameters=ExtendedTransformParameters(
                asym_transform_index_flag=wavelet_index_ho is not None,
                wavelet_index_ho=wavelet_index_ho,
                asym_transform_flag=dwt_depth_ho is not None,
                dwt_depth_ho=dwt_depth_ho,
            ),
        )

        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.sequence_header
                            ),
                            sequence_header=SequenceHeader(
                                parse_parameters=pp,
                                video_parameters=SourceParameters(
                                    frame_rate=FrameRate(
                                        custom_frame_rate_flag=frame_rate_index
                                        is not None,
                                        index=frame_rate_index,
                                    ),
                                    signal_range=SignalRange(
                                        custom_signal_range_flag=signal_range_index
                                        is not None,
                                        index=signal_range_index,
                                    ),
                                    color_spec=ColorSpec(
                                        custom_color_spec_flag=True,
                                        index=color_spec_index,
                                        color_primaries=ColorPrimaries(
                                            custom_color_primaries_flag=color_primaries_index
                                            is not None,
                                            index=color_primaries_index,
                                        ),
                                        color_matrix=ColorMatrix(
                                            custom_color_matrix_flag=color_matrix_index
                                            is not None,
                                            index=color_matrix_index,
                                        ),
                                        transfer_function=TransferFunction(
                                            custom_transfer_function_flag=transfer_function_index
                                            is not None,
                                            index=transfer_function_index,
                                        ),
                                    ),
                                ),
                            ),
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=parse_code,
                            ),
                            picture_parse=PictureParse(
                                wavelet_transform=WaveletTransform(
                                    transform_parameters=tp,
                                )
                            ),
                            fragment_parse=FragmentParse(
                                transform_parameters=tp,
                            ),
                        ),
                    ]
                )
            ]
        )
        autofill_major_version(stream)
        assert pp["major_version"] == exp_version
        if pp["major_version"] == 3:
            assert "extended_transform_parameters" in tp
        else:
            assert "extended_transform_parameters" not in tp
예제 #26
0
    def test_works_on_multiple_sequences(self):
        f = BytesIO()
        w = BitstreamWriter(f)

        # Sequence with every data unit type and fully automatic numbers
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            )
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            )
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.end_of_sequence
                            )
                        ),
                    ]
                ),
                Sequence(
                    data_units=[
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            )
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.padding_data
                            )
                        ),
                        DataUnit(
                            parse_info=ParseInfo(
                                parse_code=tables.ParseCodes.end_of_sequence
                            )
                        ),
                    ]
                ),
            ]
        )

        (
            next_parse_offsets_to_autofill,
            previous_parse_offsets_to_autofill,
        ) = autofill_parse_offsets(stream)

        print(stream)
        with Serialiser(w, stream, vc2_default_values_with_auto) as serdes:
            vc2.parse_stream(serdes, State())
        w.flush()

        autofill_parse_offsets_finalize(
            w,
            serdes.context,
            next_parse_offsets_to_autofill,
            previous_parse_offsets_to_autofill,
        )

        f.seek(0)
        r = BitstreamReader(f)
        with Deserialiser(r) as serdes:
            vc2.parse_stream(serdes, State())

        parse_infos = [
            [data_unit["parse_info"] for data_unit in sequence["data_units"]]
            for sequence in serdes.context["sequences"]
        ]

        # Check for start/end offsets being zero
        for sequence_pis in parse_infos:
            assert sequence_pis[0]["previous_parse_offset"] == 0
            assert sequence_pis[-1]["next_parse_offset"] == 0

            # Check for offset correctness
            for pi1, pi2 in zip(sequence_pis, sequence_pis[1:]):
                assert pi1["next_parse_offset"] == 13
                assert pi2["previous_parse_offset"] == 13
예제 #27
0
def test_autofill_and_serialise_stream():
    f = BytesIO()

    # Sequence with every data unit type and fully automatic numbers
    stream = Stream(
        sequences=[
            Sequence(
                data_units=[
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.sequence_header
                        ),
                        sequence_header=SequenceHeader(
                            video_parameters=SourceParameters(
                                # Tiny custom frame-size used to reduce test suite runtime
                                frame_size=FrameSize(
                                    custom_dimensions_flag=True,
                                    frame_width=4,
                                    frame_height=4,
                                )
                            ),
                        ),
                    ),
                    # Pictures
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.low_delay_picture
                        ),
                    ),
                    # High quality fragment
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=0)
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=1)
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=1)
                        ),
                    ),
                    # Low delay fragment
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=0)
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=1)
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=1)
                        ),
                    ),
                    # Other types
                    DataUnit(
                        parse_info=ParseInfo(parse_code=tables.ParseCodes.padding_data),
                        padding=Padding(bytes=b"123"),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.auxiliary_data
                        ),
                        auxiliary_data=AuxiliaryData(bytes=b"123"),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.end_of_sequence
                        ),
                    ),
                ]
            )
        ]
    )

    autofill_and_serialise_stream(f, stream)

    f.seek(0)
    r = BitstreamReader(f)
    with Deserialiser(r) as serdes:
        vc2.parse_stream(serdes, State())

    parse_infos = [
        data_unit["parse_info"]
        for sequence in serdes.context["sequences"]
        for data_unit in sequence["data_units"]
    ]

    # Check for start/end offsets being zero
    assert parse_infos[0]["previous_parse_offset"] == 0
    assert parse_infos[-1]["next_parse_offset"] == 0

    # Check for consistency and plausibility of offsets
    for pi1, pi2 in zip(parse_infos, parse_infos[1:]):
        assert pi1["next_parse_offset"] > 13
        assert pi2["previous_parse_offset"] > 13

        assert pi1["next_parse_offset"] == pi2["previous_parse_offset"]

    # Check picture numbers
    picture_numbers = [
        (
            data_unit.get("picture_parse", {}).get("picture_header", {})
            or data_unit.get("fragment_parse", {}).get("fragment_header", {})
        ).get("picture_number")
        for sequence in serdes.context["sequences"]
        for data_unit in sequence["data_units"]
    ]
    assert picture_numbers == [
        None,
        0,
        1,
        2,
        2,
        2,
        3,
        3,
        3,
        None,
        None,
        None,
    ]

    # Check major version is autofilled with 3 (due to presence of fragments)
    major_versions = [
        data_unit["sequence_header"]["parse_parameters"]["major_version"]
        for sequence in serdes.context["sequences"]
        for data_unit in sequence["data_units"]
        if data_unit["parse_info"]["parse_code"] == tables.ParseCodes.sequence_header
    ]
    assert all(v == 3 for v in major_versions)
예제 #28
0
 def test_dont_change_non_auto_picture_numbers(self, seq):
     # Shouldn't crash or make any changes
     stream = Stream(sequences=[seq])
     stream_orig = deepcopy(stream)
     autofill_picture_number(stream)
     assert stream == stream_orig
예제 #29
0
def source_parameters_encodings(codec_features):
    """
    **Tests the decoder can decode different encodings of the video format
    metadata.**

    This series of test cases each contain the same source parameters (11.4),
    but in different ways.

    ``source_parameters_encodings[custom_flags_combination_?_base_video_format_?]``
        For these test cases, the base video format which most closely matches
        the desired video format is used. Each test case incrementally checks
        that source parameters can be explicitly set to their desired values
        (e.g. by setting ``custom_*_flag`` bits to 1).

    ``source_parameters_encodings[base_video_format_?]``
        These test cases, check that other base video formats can be used (and
        overridden) to specify the desired video format. Each of these test
        cases will explicitly specify as few video parameters as possible (e.g.
        setting as many ``custom_*_flag`` fields to 0 as possible).

    .. tip::

        The :ref:`vc2-bitstream-viewer` can be used to display the encoding
        used in a given test case as follows::

            $ vc2-bitstream-viewer --show sequence_header path/to/test_case.vc2

    .. note::

        Some VC-2 levels constrain the allowed encoding of source parameters in
        the bit stream and so fewer test cases will be produced in this
        instance.

    .. note::

        Not all base video formats can be used as the basis for encoding a
        specific video format. For example, the 'top field first' flag (11.3)
        set by a base video format cannot be overridden. As a result, test
        cases will not include every base video format index.

    """
    # Generate a base sequence in which we'll replace the sequence headers
    # later
    base_sequence = make_sequence(
        codec_features,
        static_sprite(
            codec_features["video_parameters"],
            codec_features["picture_coding_mode"],
        ),
    )

    # To keep the number of tests sensible, we'll include all sequence header
    # encodings using the best-matching base video format followed by the
    # least-custom-overridden encoding for all other base video formats. This
    # checks out as many 'custom' flags as possible (against the best-matching
    # base video format) and also checks (as best possible) the other base
    # video format values are correct.
    best_base_video_format = None
    last_base_video_format = None
    for i, sequence_header in enumerate(iter_sequence_headers(codec_features)):
        base_video_format = sequence_header["base_video_format"]

        # The iter_sequence_headers function returns headers with the best
        # matching base video format first
        if best_base_video_format is None:
            best_base_video_format = base_video_format

        # The iter_source_parameter_options produces sequence headers with
        # base video formats grouped consecutively. The first example of each
        # will use the fewest possible 'custom' flags and therefore best tests
        # that the base video format parameters are correct in the decoder.
        first_example_of_base_video_format = base_video_format != last_base_video_format
        last_base_video_format = base_video_format

        if base_video_format == best_base_video_format:
            yield TestCase(
                Stream(
                    sequences=[replace_sequence_headers(base_sequence, sequence_header)]
                ),
                "custom_flags_combination_{}_base_video_format_{:d}".format(
                    i + 1,
                    base_video_format,
                ),
            )
        elif first_example_of_base_video_format:
            yield TestCase(
                Stream(
                    sequences=[replace_sequence_headers(base_sequence, sequence_header)]
                ),
                "base_video_format_{:d}".format(base_video_format),
            )
예제 #30
0
    def test_fragments(self, parse_code):
        stream = Stream(
            sequences=[
                Sequence(
                    data_units=[
                        # First in sequence should be auto-numbered to expected start
                        # offset
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(
                                    picture_number=AUTO,
                                    fragment_slice_count=0,
                                )
                            ),
                        ),
                        # If not the first fragment in the picture, the picture number
                        # should not be incremented
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(
                                    picture_number=AUTO,
                                    fragment_slice_count=1,
                                )
                            ),
                        ),
                        # If picture number not mentioned, it should still be autofilled
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(
                                    fragment_slice_count=1,
                                )
                            ),
                        ),
                        # Should auto increment on new picture started
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(
                                    picture_number=AUTO,
                                    fragment_slice_count=0,
                                )
                            ),
                        ),
                        # If explicit picture number when given, should be used
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(
                                    picture_number=4321,
                                    fragment_slice_count=0,
                                )
                            ),
                        ),
                        # ...even if that changes the picture number mid picture
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(
                                    picture_number=0xFFFFFFFE,
                                    fragment_slice_count=1,
                                )
                            ),
                        ),
                        # Should continue on from last explicit number
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(
                                    picture_number=AUTO,
                                    fragment_slice_count=0,
                                )
                            ),
                        ),
                        # Should wrap-around
                        DataUnit(
                            parse_info=ParseInfo(parse_code=parse_code),
                            fragment_parse=FragmentParse(
                                fragment_header=FragmentHeader(
                                    picture_number=AUTO,
                                    fragment_slice_count=0,
                                )
                            ),
                        ),
                    ]
                )
            ]
        )

        autofill_picture_number(stream, 1234)

        picture_numbers = [
            data_unit["fragment_parse"]["fragment_header"]["picture_number"]
            for seq in stream["sequences"]
            for data_unit in seq["data_units"]
        ]
        assert picture_numbers == [
            1234,
            1234,
            1234,
            1235,
            4321,
            0xFFFFFFFE,
            0xFFFFFFFF,
            0x0,
        ]