Beispiel #1
0
    def test_qindex_matters(self):
        codec_features = deepcopy(MINIMAL_CODEC_FEATURES)
        codec_features["lossless"] = True
        codec_features["picture_bytes"] = None

        # Sanity check: Make sure we're outputting some kind of picture which
        # really does depend on quantization
        pictures = {False: [], True: []}
        for override_qindex in [False, True]:
            stream = lossless_quantization(codec_features)

            if override_qindex:
                for _state, _sx, _sy, hq_slice in iter_slices_in_sequence(
                    codec_features,
                    stream["sequences"][0],
                ):
                    hq_slice["qindex"] = 0

            # Serialise
            f = BytesIO()
            autofill_and_serialise_stream(f, stream)
            f.seek(0)

            # Decode
            def output_picture_callback(picture, video_parameters, picture_coding_mode):
                pictures[override_qindex].append(picture)

            state = State(_output_picture_callback=output_picture_callback)
            init_io(state, f)
            parse_stream(state)

        # Make sure that the qindex mattered by checking that decoding with
        # qindex clamped to 0 resulted in different pictures
        assert pictures[False] != pictures[True]
Beispiel #2
0
    def test_slice_size_scaler(
        self, fragment_slice_count, num_coeffs_per_slice, exp_slice_size_scaler
    ):
        # Here we use the null transform on a 1D image with a single slice.
        # This means exactly 'frame_width' values will be encoded in each
        # picture slice (each of which should be 4 bits since they're '1').
        codec_features = deepcopy(MINIMAL_CODEC_FEATURES)
        codec_features["lossless"] = True
        codec_features["picture_bytes"] = None
        codec_features["video_parameters"]["frame_width"] = num_coeffs_per_slice
        codec_features["video_parameters"]["frame_height"] = 1
        codec_features["video_parameters"]["clean_width"] = num_coeffs_per_slice
        codec_features["video_parameters"]["clean_height"] = 1
        codec_features["dwt_depth"] = 0
        codec_features["dwt_depth_ho"] = 0
        codec_features["slices_x"] = 1
        codec_features["slices_y"] = 1

        stream = lossless_quantization(codec_features)

        for tp in iter_transform_parameters_in_sequence(
            codec_features, stream["sequences"][0]
        ):
            slice_size_scaler = tp["slice_parameters"]["slice_size_scaler"]
            assert slice_size_scaler == exp_slice_size_scaler

        # Sanity check: serialise and make sure the lengths/slice size scaler
        # are sufficient (serialisation will fail if not)
        autofill_and_serialise_stream(BytesIO(), stream)
Beispiel #3
0
def output_decoder_test_case(output_dir, codec_features, test_case):
    """
    Write a decoder test case to disk.

    Parameters
    ==========
    output_dir : str
        Output directory to write test cases to.
    codec_features : :py:class:`~vc2_conformance.codec_features.CodecFeatures`
    test_case : :py:class:`~vc2_conformance.test_cases.TestCase`
    """
    # Serialise bitstream
    bitstream_filename = os.path.join(
        output_dir,
        "{}.vc2".format(test_case.name),
    )
    with open(bitstream_filename, "wb") as f:
        autofill_and_serialise_stream(f, test_case.value)

    # Decode model answer
    model_answer_directory = os.path.join(
        output_dir,
        "{}_expected".format(test_case.name),
    )
    makedirs(model_answer_directory, exist_ok=True)
    with open(bitstream_filename, "rb") as f:
        index = [0]

        def output_picture(picture, video_parameters, picture_coding_mode):
            file_format.write(
                picture,
                video_parameters,
                picture_coding_mode,
                os.path.join(
                    model_answer_directory,
                    "picture_{}.raw".format(index[0]),
                ),
            )
            index[0] += 1

        state = State(_output_picture_callback=output_picture)
        init_io(state, f)
        parse_stream(state)

    # Write metadata
    if test_case.metadata is not None:
        with open(
            os.path.join(
                output_dir,
                "{}_metadata.json".format(test_case.name),
            ),
            "w",
        ) as f:
            json.dump(test_case.metadata, f)

    logging.info(
        "Generated decoder test case %s for %s",
        test_case.name,
        codec_features["name"],
    )
Beispiel #4
0
def test_next_parse_offset_is_zero(profile, fragment_slice_count):
    # Verify that the next parse offset really is zero after serialisation

    # Generate
    codec_features = dict(
        MINIMAL_CODEC_FEATURES,
        profile=profile,
        fragment_slice_count=fragment_slice_count,
    )
    stream = absent_next_parse_offset(codec_features)

    # Serialise
    f = BytesIO()
    autofill_and_serialise_stream(f, stream)

    # Deserialise
    f.seek(0)
    with Deserialiser(BitstreamReader(f)) as des:
        parse_stream(des, State())
    decoded_stream = des.context

    # Only the sequence header at the start should have a next_parse_offset
    # defined.
    next_parse_offset_is_zero = [
        data_unit["parse_info"]["next_parse_offset"] == 0
        for seq in decoded_stream["sequences"]
        for data_unit in seq["data_units"]
    ]
    assert not next_parse_offset_is_zero[0]
    assert all(next_parse_offset_is_zero[1:])
def test_all_decoder_test_cases(codec_features, test_case):
    # Every test case for every basic video mode must produce a valid bitstream
    # containing pictures with the correct format. Any JSON metadata must also
    # be seriallisable.

    # Must return a Stream
    assert isinstance(test_case.value, Stream)

    # Mustn't crash!
    json.dumps(test_case.metadata)

    # Serialise
    f = BytesIO()
    autofill_and_serialise_stream(f, test_case.value)
    f.seek(0)

    # Deserialise/validate
    def output_picture_callback(picture, video_parameters,
                                picture_coding_mode):
        assert video_parameters == codec_features["video_parameters"]
        assert picture_coding_mode == codec_features["picture_coding_mode"]

    state = State(_output_picture_callback=output_picture_callback, )

    with alternative_level_1():
        init_io(state, f)
        parse_stream(state)
def test_slice_padding_data(profile, lossless):
    # Check that the expected padding data for each picture component makes it
    # into the stream

    codec_features = MINIMAL_CODEC_FEATURES.copy()
    codec_features["profile"] = profile
    codec_features["lossless"] = lossless
    if lossless:
        codec_features["picture_bytes"] = None

    # The first 10 bytes of padding data for each component
    # {padding_field_name: set([bitarray, ...]), ...}
    component_padding_first_16_bits = defaultdict(set)

    # The number of times "BBCD" appears (byte aligned) in the bitstream
    end_of_sequence_counts = []

    def find_slice_padding_bytes(d):
        if isinstance(d, dict):
            for key, value in d.items():
                if key.endswith("_block_padding"):
                    component_padding_first_16_bits[key].add(value[:16].to01())
                else:
                    find_slice_padding_bytes(value)
        elif isinstance(d, list):
            for v in d:
                find_slice_padding_bytes(v)

    for test_case in slice_padding_data(codec_features):
        find_slice_padding_bytes(test_case.value)
        f = BytesIO()

        autofill_and_serialise_stream(f, test_case.value)

        end_of_sequence = b"BBCD" + bytearray([ParseCodes.end_of_sequence])
        end_of_sequence_counts.append(f.getvalue().count(end_of_sequence))

    if profile == Profiles.high_quality:
        components = ["Y", "C1", "C2"]
    elif profile == Profiles.low_delay:
        components = ["Y", "C"]

    # Check that the non-aligned padding values appear as expected
    for component in components:
        key = "{}_block_padding".format(component.lower())
        assert "0000000000000000" in component_padding_first_16_bits[key]
        assert "1111111111111111" in component_padding_first_16_bits[key]
        assert "1010101010101010" in component_padding_first_16_bits[key]
        assert "0101010101010101" in component_padding_first_16_bits[key]

    # Check the final test cases insert extra (byte aligned!) end-of-sequence
    # blocks in the padding data (NB: we don't test that they appear in the
    # right places... but hey...)
    for count in end_of_sequence_counts[:-len(components)]:
        assert count == 1
    for count in end_of_sequence_counts[-len(components):]:
        assert count > 1
def encode_and_decode(stream):
    f = BytesIO()
    autofill_and_serialise_stream(f, stream)
    f.seek(0)

    pictures = []
    state = State(
        _output_picture_callback=lambda p, vp, pcm: pictures.append(p))
    init_io(state, f)
    parse_stream(state)

    return pictures
    def serialise_and_decode_pictures(self, stream):
        f = BytesIO()
        autofill_and_serialise_stream(f, stream)

        pictures = []
        state = State(
            _output_picture_callback=lambda pic, vp, pcm: pictures.append(pic))
        f.seek(0)
        decoder.init_io(state, f)
        decoder.parse_stream(state)

        return pictures
Beispiel #9
0
def check_codec_features_valid(codec_feature_sets):
    """
    Verify that the codec features requested don't themselves violate the spec
    (e.g. violate a level constraint). This is done by generating then
    validating a bitstream containing a single mid-gray frame.

    Prints an error to stderr and calls :py:func:`sys.exit` if a problem is
    encountered.
    """
    logging.info("Checking codec feature sets are valid...")
    for name, codec_features in codec_feature_sets.items():
        logging.info("Checking %r...", name)
        f = BytesIO()

        # Sanity-check the color format (since this won't raise a validation
        # error but will result in a useless gamut being available).
        sanity = sanity_check_video_parameters(codec_features["video_parameters"])
        if not sanity:
            logging.warning(
                "Color specification for codec configuration %r be malformed: %s",
                name,
                sanity.explain(),
            )

        # Generate a minimal bitstream
        try:
            autofill_and_serialise_stream(f, static_gray(codec_features))
        except UnsatisfiableCodecFeaturesError as e:
            sys.stderr.write(
                "Error: Codec configuration {!r} is invalid:\n".format(name)
            )
            terminal_width = get_terminal_size()[0]
            sys.stderr.write(wrap_paragraphs(e.explain(), terminal_width))
            sys.stderr.write("\n")
            sys.exit(4)
        f.seek(0)

        # Validate it meets the spec
        state = State()
        init_io(state, f)
        try:
            parse_stream(state)
        except ConformanceError as e:
            sys.stderr.write(
                "Error: Codec configuration {!r} is invalid:\n".format(name)
            )
            terminal_width = get_terminal_size()[0]
            sys.stderr.write(wrap_paragraphs(e.explain(), terminal_width))
            sys.stderr.write("\n")
            sys.exit(4)
Beispiel #10
0
    def test_length_unchanged_for_non_lossless(
        self, fragment_slice_count, picture_bytes
    ):
        codec_features = CodecFeatures(
            MINIMAL_CODEC_FEATURES,
            profile=Profiles.high_quality,
            picture_bytes=picture_bytes,
            fragment_slice_count=fragment_slice_count,
        )

        # Get length of sequence containing no prefix bytes
        f = BytesIO()
        autofill_and_serialise_stream(
            f,
            Stream(
                sequences=[
                    make_sequence(
                        codec_features,
                        mid_gray(
                            codec_features["video_parameters"],
                            codec_features["picture_coding_mode"],
                        ),
                    )
                ]
            ),
        )
        expected_data_unit_lengths = deserialise_and_measure_slice_data_unit_sizes(
            f.getvalue()
        )
        # Sanity check the deserialise_and_measure_slice_data_unit_sizes
        # function is working...
        assert len(expected_data_unit_lengths) >= 1

        test_cases = list(slice_prefix_bytes(codec_features))

        assert len(test_cases) == 3

        for test_case in test_cases:
            f = BytesIO()
            autofill_and_serialise_stream(f, test_case.value)
            data_unit_lengths = deserialise_and_measure_slice_data_unit_sizes(
                f.getvalue()
            )
            assert data_unit_lengths == expected_data_unit_lengths
Beispiel #11
0
def serialize_and_decode(sequence):
    # Serialise
    f = BytesIO()
    autofill_and_serialise_stream(f, Stream(sequences=[sequence]))

    # Setup callback to capture decoded pictures
    decoded_pictures = []

    def output_picture_callback(picture, video_parameters,
                                picture_coding_mode):
        decoded_pictures.append(picture)

    # Feed to conformance checking decoder
    f.seek(0)
    state = State(_output_picture_callback=output_picture_callback)
    init_io(state, f)
    parse_stream(state)

    return decoded_pictures
Beispiel #12
0
def check_for_signal_clipping(sequence):
    """
    Given a :py:class:`vc2_conformance.bitstream.Sequence`, return True if any
    picture component signal was clipped during decoding.
    """
    # NB: Internally we just check for saturated signal levels. This way we
    # avoid the need to modify the decoder to remove the clipper and all that
    # faff...

    # Serialise
    f = BytesIO()
    # NB: Deepcopy required due to autofill_and_serialise_stream mutating the
    # stream
    stream = Stream(sequences=[deepcopy(sequence)])
    autofill_and_serialise_stream(f, stream)
    f.seek(0)

    # Decode and look for saturated pixel values
    state = State()
    may_have_clipped = [False]

    def output_picture_callback(picture, video_parameters, picture_coding_mode):
        components_and_depths = [
            ("Y", state["luma_depth"]),
            ("C1", state["color_diff_depth"]),
            ("C2", state["color_diff_depth"]),
        ]

        for component, depth in components_and_depths:
            min_value = min(min(row) for row in picture[component])
            max_value = max(max(row) for row in picture[component])
            if min_value == 0:
                may_have_clipped[0] = True
            if max_value == (1 << depth) - 1:
                may_have_clipped[0] = True

    state["_output_picture_callback"] = output_picture_callback
    init_io(state, f)
    parse_stream(state)

    return may_have_clipped[0]
Beispiel #13
0
    def test_data_is_as_expected(self, fragment_slice_count, lossless):
        codec_features = CodecFeatures(
            MINIMAL_CODEC_FEATURES,
            profile=Profiles.high_quality,
            picture_bytes=None if lossless else 64,
            lossless=lossless,
            fragment_slice_count=fragment_slice_count,
        )

        test_cases = list(slice_prefix_bytes(codec_features))

        assert len(test_cases) == 3

        for test_case in test_cases:
            f = BytesIO()
            autofill_and_serialise_stream(f, test_case.value)
            f.seek(0)

            log = []

            def log_prefix_bytes(_des, target, value):
                if target == "prefix_bytes":
                    log.append(value)

            with MonitoredDeserialiser(log_prefix_bytes, BitstreamReader(f)) as des:
                parse_stream(des, State())
            # Sanity check for test
            assert len(log) > 0

            for prefix_bytes in log:
                if test_case.subcase_name == "zeros":
                    assert all(b == 0 for b in bytearray(prefix_bytes))
                elif test_case.subcase_name == "ones":
                    assert all(b == 0xFF for b in bytearray(prefix_bytes))
                elif test_case.subcase_name == "end_of_sequence":
                    assert b"BBCD" in prefix_bytes
                else:
                    assert False
def test_autofill_and_serialise_stream():
    f = BytesIO()

    # Sequence with every data unit type and fully automatic numbers
    stream = Stream(
        sequences=[
            Sequence(
                data_units=[
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.sequence_header
                        ),
                        sequence_header=SequenceHeader(
                            video_parameters=SourceParameters(
                                # Tiny custom frame-size used to reduce test suite runtime
                                frame_size=FrameSize(
                                    custom_dimensions_flag=True,
                                    frame_width=4,
                                    frame_height=4,
                                )
                            ),
                        ),
                    ),
                    # Pictures
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.low_delay_picture
                        ),
                    ),
                    # High quality fragment
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=0)
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=1)
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=1)
                        ),
                    ),
                    # Low delay fragment
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=0)
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=1)
                        ),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.high_quality_picture_fragment
                        ),
                        fragment_parse=FragmentParse(
                            fragment_header=FragmentHeader(fragment_slice_count=1)
                        ),
                    ),
                    # Other types
                    DataUnit(
                        parse_info=ParseInfo(parse_code=tables.ParseCodes.padding_data),
                        padding=Padding(bytes=b"123"),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.auxiliary_data
                        ),
                        auxiliary_data=AuxiliaryData(bytes=b"123"),
                    ),
                    DataUnit(
                        parse_info=ParseInfo(
                            parse_code=tables.ParseCodes.end_of_sequence
                        ),
                    ),
                ]
            )
        ]
    )

    autofill_and_serialise_stream(f, stream)

    f.seek(0)
    r = BitstreamReader(f)
    with Deserialiser(r) as serdes:
        vc2.parse_stream(serdes, State())

    parse_infos = [
        data_unit["parse_info"]
        for sequence in serdes.context["sequences"]
        for data_unit in sequence["data_units"]
    ]

    # Check for start/end offsets being zero
    assert parse_infos[0]["previous_parse_offset"] == 0
    assert parse_infos[-1]["next_parse_offset"] == 0

    # Check for consistency and plausibility of offsets
    for pi1, pi2 in zip(parse_infos, parse_infos[1:]):
        assert pi1["next_parse_offset"] > 13
        assert pi2["previous_parse_offset"] > 13

        assert pi1["next_parse_offset"] == pi2["previous_parse_offset"]

    # Check picture numbers
    picture_numbers = [
        (
            data_unit.get("picture_parse", {}).get("picture_header", {})
            or data_unit.get("fragment_parse", {}).get("fragment_header", {})
        ).get("picture_number")
        for sequence in serdes.context["sequences"]
        for data_unit in sequence["data_units"]
    ]
    assert picture_numbers == [
        None,
        0,
        1,
        2,
        2,
        2,
        3,
        3,
        3,
        None,
        None,
        None,
    ]

    # Check major version is autofilled with 3 (due to presence of fragments)
    major_versions = [
        data_unit["sequence_header"]["parse_parameters"]["major_version"]
        for sequence in serdes.context["sequences"]
        for data_unit in sequence["data_units"]
        if data_unit["parse_info"]["parse_code"] == tables.ParseCodes.sequence_header
    ]
    assert all(v == 3 for v in major_versions)