def test_replace_padding_data(): orig_seq = Sequence(data_units=[ DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.padding_data), padding=Padding(bytes=b"foo"), ), DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.padding_data), padding=Padding(bytes=b"bar"), ), DataUnit(parse_info=ParseInfo( parse_code=ParseCodes.end_of_sequence), ), ]) orig_seq_copy = deepcopy(orig_seq) new_seq = replace_padding_data(orig_seq, b"baz") # Copy returned assert new_seq is not orig_seq assert orig_seq == orig_seq_copy # Data as expected assert new_seq["data_units"][0]["padding"]["bytes"] == b"baz" assert new_seq["data_units"][1]["padding"]["bytes"] == b"baz" assert new_seq["data_units"][2] == orig_seq["data_units"][2]
def test_pictures(self, parse_code): stream = Stream( sequences=[ Sequence( data_units=[ # First in sequence should be auto-numbered to expected start # offset DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=AUTO) ), ), # If picture number not mentioned, it should be autofilled DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse(picture_header=PictureHeader()), ), # If explicit picture number given, should be used DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=0xFFFFFFFE) ), ), # Should continue from last explicit number if given DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=AUTO) ), ), # Should wrap-around DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=AUTO) ), ), ] ) ] ) autofill_picture_number(stream, 1234) picture_numbers = [ data_unit["picture_parse"]["picture_header"]["picture_number"] for seq in stream["sequences"] for data_unit in seq["data_units"] ] assert picture_numbers == [ 1234, 1235, 0xFFFFFFFE, 0xFFFFFFFF, 0x0, ]
def test_replace_sequence_headers(): orig_seq = Sequence(data_units=[ DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.sequence_header), sequence_header=SequenceHeader( base_video_format=0, video_parameters=SourceParameters(), ), ), DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.padding_data), padding=Padding(), ), DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.sequence_header), sequence_header=SequenceHeader( base_video_format=0, video_parameters=SourceParameters(), ), ), ]) orig_seq_copy = deepcopy(orig_seq) new_sequence_header = SequenceHeader( base_video_format=100, video_parameters=SourceParameters( frame_size=FrameSize(custom_dimensions_flag=True), ), ) new_seq = replace_sequence_headers(orig_seq, new_sequence_header) # Original not modified assert orig_seq == orig_seq_copy assert len(new_seq["data_units"]) == 3 sh0 = new_seq["data_units"][0]["sequence_header"] sh2 = new_seq["data_units"][2]["sequence_header"] # Non-sequence header data unit should not have changed assert new_seq["data_units"][1] == orig_seq["data_units"][1] # Should have replaced sequence headers assert sh0 == new_sequence_header assert sh2 == new_sequence_header # ...with copies assert sh0 is not new_sequence_header assert sh2 is not new_sequence_header
def test_padding_and_aux_data(self, parse_code): stream = Stream( sequences=[ Sequence( data_units=[ # Next parse offset not given (should be treated as auto) DataUnit(parse_info=ParseInfo(parse_code=parse_code)), # Next parse offset is explicitly AUTO DataUnit( parse_info=ParseInfo( parse_code=parse_code, next_parse_offset=AUTO, ), ), # Next parse offset is given (should not be modified) DataUnit( parse_info=ParseInfo( parse_code=parse_code, next_parse_offset=100, ), ), ] ) ] ) for seq in stream["sequences"]: for data_unit in seq["data_units"]: if parse_code == tables.ParseCodes.padding_data: data_unit["padding"] = Padding(bytes=b"1234") elif parse_code == tables.ParseCodes.auxiliary_data: data_unit["auxiliary_data"] = AuxiliaryData(bytes=b"1234") assert autofill_parse_offsets(stream) == ([], [(0, 0), (0, 1), (0, 2)]) next_parse_offsets = [ data_unit["parse_info"]["next_parse_offset"] for seq in stream["sequences"] for data_unit in seq["data_units"] ] assert next_parse_offsets == [13 + 4, 13 + 4, 100] previous_parse_offsets = [ data_unit["parse_info"]["previous_parse_offset"] for seq in stream["sequences"] for data_unit in seq["data_units"] ] assert previous_parse_offsets == [0, 0, 0]
def test_fragment_tp_created(self, parse_code): data_unit = DataUnit( parse_info=ParseInfo(parse_code=parse_code), ) tp = get_transform_parameters(data_unit) assert tp == TransformParameters() tp["dwt_depth"] = 4 # Transform parameters (and parent structures) created assert data_unit == DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( transform_parameters=TransformParameters( dwt_depth=4, ), ), )
def test_picture_tp_created(self, parse_code): data_unit = DataUnit( parse_info=ParseInfo(parse_code=parse_code), ) tp = get_transform_parameters(data_unit) assert tp == TransformParameters() tp["dwt_depth"] = 4 # Transform parameters (and parent structures) created assert data_unit == DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( wavelet_transform=WaveletTransform( transform_parameters=TransformParameters( dwt_depth=4, ), ), ), )
def test_picture_tp_already_exists(self, parse_code, existing_tp): data_unit = DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( wavelet_transform=WaveletTransform( transform_parameters=existing_tp, ), ), ) assert get_transform_parameters(data_unit) is existing_tp
def test_fragment_tp_already_exists(self, parse_code, existing_tp): data_unit = DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_slice_count=0, ), transform_parameters=existing_tp, ), ) assert get_transform_parameters(data_unit) is existing_tp
def test_manual_version_numbers_unaltered(self): # The following stream includes a (conflicting) major version of 2 and # defined extended transform parameters. Because the major version is # explicit, the auto filler should just ignore the conflicting # version/ETP. stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header ), sequence_header=SequenceHeader( parse_parameters=ParseParameters( major_version=2, ), ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( transform_parameters=TransformParameters( extended_transform_parameters=ExtendedTransformParameters( asym_transform_flag=True, dwt_depth_ho=2, ), ), ), ), ] ) ] ) stream_orig = deepcopy(stream) autofill_major_version(stream) assert stream == stream_orig
def test_values_to_be_set_later_are_set_to_zero(self, parse_code, explicit_auto): stream = Stream( sequences=[ Sequence( data_units=[ # An automatically set data unit DataUnit( parse_info=ParseInfo( parse_code=parse_code, ) ), # One which is explicitly set (and should not be overridden) DataUnit( parse_info=ParseInfo( parse_code=parse_code, next_parse_offset=100, previous_parse_offset=200, ) ), ] ) ] ) if explicit_auto: parse_info = stream["sequences"][0]["data_units"][0]["parse_info"] parse_info["next_parse_offset"] = AUTO parse_info["previous_parse_offset"] = AUTO assert autofill_parse_offsets(stream) == ([(0, 0)], [(0, 0)]) parse_info_0 = stream["sequences"][0]["data_units"][0]["parse_info"] assert parse_info_0["next_parse_offset"] == 0 assert parse_info_0["previous_parse_offset"] == 0 parse_info_1 = stream["sequences"][0]["data_units"][1]["parse_info"] assert parse_info_1["next_parse_offset"] == 100 assert parse_info_1["previous_parse_offset"] == 200
def test_insertion_of_parse_parameters_when_absent(self): stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header ) ) ] ) ] ) autofill_major_version(stream) # A parse parameters field (and the inferred version number) should # have been inserted (since the major_version field defaults to AUTO) assert stream == Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header ), sequence_header=SequenceHeader( parse_parameters=ParseParameters( major_version=2, ), ), ) ] ) ] )
def test_padding_and_aux_data_default_data(self, parse_code): # No data given (default (empty) data should be assumed) stream = Stream( sequences=[ Sequence( data_units=[DataUnit(parse_info=ParseInfo(parse_code=parse_code))] ) ] ) assert autofill_parse_offsets(stream) == ([], [(0, 0)]) parse_info = stream["sequences"][0]["data_units"][0]["parse_info"] assert parse_info["next_parse_offset"] == 13 assert parse_info["previous_parse_offset"] == 0
def test_minimal_sequence_unmodified(self): stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ) ) ] ) ] ) stream_orig = deepcopy(stream) autofill_major_version(stream) assert stream == stream_orig
def make_picture_parse_data_unit(codec_features, picture, minimum_qindex=0, minimum_slice_size_scaler=1): """ Create a :py:class:`~vc2_conformance.bitstream.DataUnit` object containing a (possibly lossily compressed) picture. Parameters ========== codec_features : :py:class:`~vc2_conformance.codec_features.CodecFeatures` picture : {"Y": [[s, ...], ...], "C1": ..., "C2": ..., "pic_num": int} The picture to be encoded. This picture will be compressed using a simple VC-2 encoder implementation. It does not necessarily produce the most high-quality encodings. If ``pic_num`` is omitted, ``picture_number`` fields will be omitted in the output. minimum_qindex : int Specifies the minimum quantization index to be used. Must be 0 for lossless codecs. minimum_slice_size_scaler : int Specifies the minimum slice_size_scaler to be used for high quality pictures. Ignored in low delay mode. Returns ======= data_unit : :py:class:`vc2_conformance.bitstream.DataUnit` """ assert codec_features["fragment_slice_count"] == 0 return DataUnit( parse_info=ParseInfo(parse_code=( ParseCodes.high_quality_picture if codec_features["profile"] == Profiles.high_quality else ParseCodes. low_delay_picture if codec_features["profile"] == Profiles. low_delay else None # Unreachable, unless a new profile is added )), picture_parse=make_picture_parse(codec_features, picture, minimum_qindex, minimum_slice_size_scaler), )
def make_sequence_header_data_unit(codec_features): """ Create a :py:class:`~vc2_conformance.bitstream.DataUnit` object containing a sequence header which sensibly encodes the features specified in :py:class:`~vc2_conformance.codec_features.CodecFeatures` dictionary provided. Parameters ========== codec_features : :py:class:`~vc2_conformance.codec_features.CodecFeatures` Returns ======= data_unit : :py:class:`~vc2_conformance.bitstream.DataUnit` Raises ======= :py:exc:`IncompatibleLevelAndVideoFormatError` """ return DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.sequence_header), sequence_header=make_sequence_header(codec_features), )
def test_multiple_sequences(self, parse_code): stream = Stream( sequences=[ Sequence( data_units=[ DataUnit(parse_info=ParseInfo(parse_code=parse_code)), DataUnit(parse_info=ParseInfo(parse_code=parse_code)), DataUnit(parse_info=ParseInfo(parse_code=parse_code)), ] ), Sequence( data_units=[ DataUnit(parse_info=ParseInfo(parse_code=parse_code)), DataUnit(parse_info=ParseInfo(parse_code=parse_code)), DataUnit(parse_info=ParseInfo(parse_code=parse_code)), ] ), ] ) autofill_picture_number(stream, 1234) picture_numbers = [ data_unit["picture_parse"]["picture_header"]["picture_number"] for seq in stream["sequences"] for data_unit in seq["data_units"] ] assert picture_numbers == [ 1234, 1235, 1236, # Restarts in second sequence 1234, 1235, 1236, ]
def test_version_selection(self, parameters, exp_version): profile = parameters.get("profile", tables.Profiles.low_delay) frame_rate_index = parameters.get("frame_rate_index") signal_range_index = parameters.get("signal_range_index") color_spec_index = parameters.get("color_spec_index", 0) color_primaries_index = parameters.get("color_primaries_index", 0) color_matrix_index = parameters.get("color_matrix_index", 0) transfer_function_index = parameters.get("transfer_function_index", 0) wavelet_index = parameters.get( "wavelet_index", tables.WaveletFilters.haar_no_shift ) wavelet_index_ho = parameters.get("wavelet_index_ho") dwt_depth_ho = parameters.get("dwt_depth_ho", None) parse_code = parameters.get("parse_code", tables.ParseCodes.low_delay_picture) # Kept separate to allow later checking of the version chosen pp = ParseParameters(major_version=AUTO, profile=profile) # Repeated in the appropriate place for fragments and pictures tp = TransformParameters( wavelet_index=wavelet_index, dwt_depth=2, extended_transform_parameters=ExtendedTransformParameters( asym_transform_index_flag=wavelet_index_ho is not None, wavelet_index_ho=wavelet_index_ho, asym_transform_flag=dwt_depth_ho is not None, dwt_depth_ho=dwt_depth_ho, ), ) stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header ), sequence_header=SequenceHeader( parse_parameters=pp, video_parameters=SourceParameters( frame_rate=FrameRate( custom_frame_rate_flag=frame_rate_index is not None, index=frame_rate_index, ), signal_range=SignalRange( custom_signal_range_flag=signal_range_index is not None, index=signal_range_index, ), color_spec=ColorSpec( custom_color_spec_flag=True, index=color_spec_index, color_primaries=ColorPrimaries( custom_color_primaries_flag=color_primaries_index is not None, index=color_primaries_index, ), color_matrix=ColorMatrix( custom_color_matrix_flag=color_matrix_index is not None, index=color_matrix_index, ), transfer_function=TransferFunction( custom_transfer_function_flag=transfer_function_index is not None, index=transfer_function_index, ), ), ), ), ), DataUnit( parse_info=ParseInfo( parse_code=parse_code, ), picture_parse=PictureParse( wavelet_transform=WaveletTransform( transform_parameters=tp, ) ), fragment_parse=FragmentParse( transform_parameters=tp, ), ), ] ) ] ) autofill_major_version(stream) assert pp["major_version"] == exp_version if pp["major_version"] == 3: assert "extended_transform_parameters" in tp else: assert "extended_transform_parameters" not in tp
def test_fragments(self, parse_code): stream = Stream( sequences=[ Sequence( data_units=[ # First in sequence should be auto-numbered to expected start # offset DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=0, ) ), ), # If not the first fragment in the picture, the picture number # should not be incremented DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=1, ) ), ), # If picture number not mentioned, it should still be autofilled DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_slice_count=1, ) ), ), # Should auto increment on new picture started DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=0, ) ), ), # If explicit picture number when given, should be used DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=4321, fragment_slice_count=0, ) ), ), # ...even if that changes the picture number mid picture DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=0xFFFFFFFE, fragment_slice_count=1, ) ), ), # Should continue on from last explicit number DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=0, ) ), ), # Should wrap-around DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=0, ) ), ), ] ) ] ) autofill_picture_number(stream, 1234) picture_numbers = [ data_unit["fragment_parse"]["fragment_header"]["picture_number"] for seq in stream["sequences"] for data_unit in seq["data_units"] ] assert picture_numbers == [ 1234, 1234, 1234, 1235, 4321, 0xFFFFFFFE, 0xFFFFFFFF, 0x0, ]
def make_auxiliary_data_unit(): return DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.auxiliary_data), auxiliary_data=AuxiliaryData(), )
class TestGetTransformParameters(object): @pytest.mark.parametrize( "data_unit", [ # Non-picture containing data units DataUnit(parse_info=ParseInfo(parse_code=parse_code)) for parse_code in [ tables.ParseCodes.sequence_header, tables.ParseCodes.end_of_sequence, tables.ParseCodes.auxiliary_data, tables.ParseCodes.padding_data, ] ] + [ # Non-first fragments DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_slice_count=1, ) ), ) for parse_code in [ tables.ParseCodes.low_delay_picture_fragment, tables.ParseCodes.high_quality_picture_fragment, ] ], ) def test_non_picture_or_first_fragment(self, data_unit): assert get_transform_parameters(data_unit) is None @pytest.mark.parametrize( "parse_code", [ tables.ParseCodes.low_delay_picture, tables.ParseCodes.high_quality_picture, ], ) @pytest.mark.parametrize( "existing_tp", [ {}, TransformParameters(), ], ) def test_picture_tp_already_exists(self, parse_code, existing_tp): data_unit = DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( wavelet_transform=WaveletTransform( transform_parameters=existing_tp, ), ), ) assert get_transform_parameters(data_unit) is existing_tp @pytest.mark.parametrize( "parse_code", [ tables.ParseCodes.low_delay_picture, tables.ParseCodes.high_quality_picture, ], ) def test_picture_tp_created(self, parse_code): data_unit = DataUnit( parse_info=ParseInfo(parse_code=parse_code), ) tp = get_transform_parameters(data_unit) assert tp == TransformParameters() tp["dwt_depth"] = 4 # Transform parameters (and parent structures) created assert data_unit == DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( wavelet_transform=WaveletTransform( transform_parameters=TransformParameters( dwt_depth=4, ), ), ), ) @pytest.mark.parametrize( "parse_code", [ tables.ParseCodes.low_delay_picture_fragment, tables.ParseCodes.high_quality_picture_fragment, ], ) @pytest.mark.parametrize( "existing_tp", [ {}, TransformParameters(), ], ) def test_fragment_tp_already_exists(self, parse_code, existing_tp): data_unit = DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_slice_count=0, ), transform_parameters=existing_tp, ), ) assert get_transform_parameters(data_unit) is existing_tp @pytest.mark.parametrize( "parse_code", [ tables.ParseCodes.low_delay_picture_fragment, tables.ParseCodes.high_quality_picture_fragment, ], ) def test_fragment_tp_created(self, parse_code): data_unit = DataUnit( parse_info=ParseInfo(parse_code=parse_code), ) tp = get_transform_parameters(data_unit) assert tp == TransformParameters() tp["dwt_depth"] = 4 # Transform parameters (and parent structures) created assert data_unit == DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( transform_parameters=TransformParameters( dwt_depth=4, ), ), )
def test_works_on_multiple_sequences(self): f = BytesIO() w = BitstreamWriter(f) # Sequence with every data unit type and fully automatic numbers stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ) ), ] ), Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ) ), ] ), ] ) ( next_parse_offsets_to_autofill, previous_parse_offsets_to_autofill, ) = autofill_parse_offsets(stream) print(stream) with Serialiser(w, stream, vc2_default_values_with_auto) as serdes: vc2.parse_stream(serdes, State()) w.flush() autofill_parse_offsets_finalize( w, serdes.context, next_parse_offsets_to_autofill, previous_parse_offsets_to_autofill, ) f.seek(0) r = BitstreamReader(f) with Deserialiser(r) as serdes: vc2.parse_stream(serdes, State()) parse_infos = [ [data_unit["parse_info"] for data_unit in sequence["data_units"]] for sequence in serdes.context["sequences"] ] # Check for start/end offsets being zero for sequence_pis in parse_infos: assert sequence_pis[0]["previous_parse_offset"] == 0 assert sequence_pis[-1]["next_parse_offset"] == 0 # Check for offset correctness for pi1, pi2 in zip(sequence_pis, sequence_pis[1:]): assert pi1["next_parse_offset"] == 13 assert pi2["previous_parse_offset"] == 13
def test_autofill_and_serialise_stream(): f = BytesIO() # Sequence with every data unit type and fully automatic numbers stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header ), sequence_header=SequenceHeader( video_parameters=SourceParameters( # Tiny custom frame-size used to reduce test suite runtime frame_size=FrameSize( custom_dimensions_flag=True, frame_width=4, frame_height=4, ) ), ), ), # Pictures DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.low_delay_picture ), ), # High quality fragment DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=0) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=1) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=1) ), ), # Low delay fragment DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=0) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=1) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=1) ), ), # Other types DataUnit( parse_info=ParseInfo(parse_code=tables.ParseCodes.padding_data), padding=Padding(bytes=b"123"), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.auxiliary_data ), auxiliary_data=AuxiliaryData(bytes=b"123"), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ), ), ] ) ] ) autofill_and_serialise_stream(f, stream) f.seek(0) r = BitstreamReader(f) with Deserialiser(r) as serdes: vc2.parse_stream(serdes, State()) parse_infos = [ data_unit["parse_info"] for sequence in serdes.context["sequences"] for data_unit in sequence["data_units"] ] # Check for start/end offsets being zero assert parse_infos[0]["previous_parse_offset"] == 0 assert parse_infos[-1]["next_parse_offset"] == 0 # Check for consistency and plausibility of offsets for pi1, pi2 in zip(parse_infos, parse_infos[1:]): assert pi1["next_parse_offset"] > 13 assert pi2["previous_parse_offset"] > 13 assert pi1["next_parse_offset"] == pi2["previous_parse_offset"] # Check picture numbers picture_numbers = [ ( data_unit.get("picture_parse", {}).get("picture_header", {}) or data_unit.get("fragment_parse", {}).get("fragment_header", {}) ).get("picture_number") for sequence in serdes.context["sequences"] for data_unit in sequence["data_units"] ] assert picture_numbers == [ None, 0, 1, 2, 2, 2, 3, 3, 3, None, None, None, ] # Check major version is autofilled with 3 (due to presence of fragments) major_versions = [ data_unit["sequence_header"]["parse_parameters"]["major_version"] for sequence in serdes.context["sequences"] for data_unit in sequence["data_units"] if data_unit["parse_info"]["parse_code"] == tables.ParseCodes.sequence_header ] assert all(v == 3 for v in major_versions)
def test_removal_of_extended_transform_parameters(self): # NB: The stream specified below is actually compatible with version 2 # so the extended transform parameters field should be removed if and # only if the major version was set to AUTO in the proceeding sequence # header. stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header, ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture, ), picture_parse=PictureParse( wavelet_transform=WaveletTransform( transform_parameters=TransformParameters( wavelet_index=tables.WaveletFilters.haar_no_shift, dwt_depth=2, extended_transform_parameters=ExtendedTransformParameters( asym_transform_index_flag=True, wavelet_index_ho=tables.WaveletFilters.haar_no_shift, asym_transform_flag=True, dwt_depth_ho=0, ), ), ), ), ), ] ) ] ) autofill_major_version(stream) assert stream == Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header, ), sequence_header=SequenceHeader( parse_parameters=ParseParameters( major_version=2, ), ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture, ), picture_parse=PictureParse( wavelet_transform=WaveletTransform( transform_parameters=TransformParameters( wavelet_index=tables.WaveletFilters.haar_no_shift, dwt_depth=2, ), ), ), ), ] ) ] )
def make_end_of_sequence_data_unit(): return DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.end_of_sequence), )
def make_padding_data_unit(): return DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.padding_data), padding=Padding(), )
def make_fragment_parse_data_units(codec_features, picture, minimum_qindex=0, minimum_slice_size_scaler=1): r""" Create a series of :py:class:`DataUnits <vc2_conformance.bitstream.DataUnit>` encoding a (possibly lossily compressed) picture. Parameters ========== codec_features : :py:class:`~vc2_conformance.codec_features.CodecFeatures` picture : {"Y": [[s, ...], ...], "C1": ..., "C2": ..., "pic_num": int} The picture to be encoded. This picture will be compressed using a simple VC-2 encoder implementation. It does not necessarily produce the most high-quality encodings. If ``pic_num`` is omitted, ``picture_number`` fields will be omitted in the output. minimum_qindex : int Specifies the minimum quantization index to be used. Must be 0 for lossless codecs. minimum_slice_size_scaler : int Specifies the minimum slice_size_scaler to be used for high quality pictures. Ignored in low delay mode. Returns ======= fragment_data_units : [:py:class:`vc2_conformance.bitstream.DataUnit`, ...] """ assert codec_features["fragment_slice_count"] != 0 # To avoid repeating ourselves, the fragmented picture is assembled from # the parts of a ready-made piture_parse. picture_parse = make_picture_parse(codec_features, picture, minimum_qindex, minimum_slice_size_scaler) wavelet_transform = picture_parse["wavelet_transform"] transform_parameters = wavelet_transform["transform_parameters"] transform_data = wavelet_transform["transform_data"] if codec_features["profile"] == Profiles.high_quality: parse_code = ParseCodes.high_quality_picture_fragment slices_name = "hq_slices" elif codec_features["profile"] == Profiles.low_delay: parse_code = ParseCodes.low_delay_picture_fragment slices_name = "ld_slices" fragment_data_units = [] # Add the first fragment containing the transform parameters fragment_data_units.append( DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_data_length=0, fragment_slice_count=0, ), transform_parameters=transform_parameters, ), )) # A count of how many slices worth of space remain in the current # (slice-containing) fragment. Initially set to zero as we don't have any # picture containing fragments. fragment_slices_remaining = 0 # Add the remaining fragments containing the picture slices slice_iterator = iter(transform_data[slices_name]) for sy in range(codec_features["slices_y"]): for sx in range(codec_features["slices_x"]): # If the current fragment is full, start a new one if fragment_slices_remaining == 0: fragment_slices_remaining = codec_features[ "fragment_slice_count"] fragment_data_units.append( DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_data_length=0, # NB: Will be incremented in the next step(s) fragment_slice_count=0, fragment_x_offset=sx, fragment_y_offset=sy, ), fragment_data=FragmentData({ # NB: Will be populated in the next step(s) slices_name: [], }), ), )) # Add the slice to the current fragment fragment_parse = fragment_data_units[-1]["fragment_parse"] fragment_parse["fragment_header"]["fragment_slice_count"] += 1 fragment_parse["fragment_data"][slices_name].append( next(slice_iterator)) fragment_slices_remaining -= 1 # Populate picture_number field in fragment headers, if one is provided if "pic_num" in picture: for data_unit in fragment_data_units: fragment_header = data_unit["fragment_parse"]["fragment_header"] fragment_header["picture_number"] = picture["pic_num"] return fragment_data_units
def test_finalizer_works(self): f = BytesIO() w = BitstreamWriter(f) # Sequence with every data unit type and fully automatic numbers stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header ), sequence_header=SequenceHeader( parse_parameters=ParseParameters(major_version=3), video_parameters=SourceParameters( # Tiny custom frame-size used to reduce test suite # runtime frame_size=FrameSize( custom_dimensions_flag=True, frame_width=4, frame_height=4, ) ), ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture ), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=0) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.low_delay_picture ), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=0) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(picture_number=0) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(picture_number=0) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ), padding=Padding(bytes=b"123"), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.auxiliary_data ), auxiliary_data=AuxiliaryData(bytes=b"123"), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ), ), ] ) ] ) ( next_parse_offsets_to_autofill, previous_parse_offsets_to_autofill, ) = autofill_parse_offsets(stream) with Serialiser(w, stream, vc2_default_values_with_auto) as serdes: vc2.parse_stream(serdes, State()) w.flush() offset_before = w.tell() autofill_parse_offsets_finalize( w, serdes.context, next_parse_offsets_to_autofill, previous_parse_offsets_to_autofill, ) assert w.tell() == offset_before f.seek(0) r = BitstreamReader(f) with Deserialiser(r) as serdes: vc2.parse_stream(serdes, State()) parse_infos = [ data_unit["parse_info"] for sequence in serdes.context["sequences"] for data_unit in sequence["data_units"] ] # Check for start/end offsets being zero assert parse_infos[0]["previous_parse_offset"] == 0 assert parse_infos[-1]["next_parse_offset"] == 0 # Check for consistency and plusibility of offsets for pi1, pi2 in zip(parse_infos, parse_infos[1:]): assert pi1["next_parse_offset"] > 13 assert pi2["previous_parse_offset"] > 13 assert pi1["next_parse_offset"] == pi2["previous_parse_offset"]
class TestAutofillPictureNumber(object): @pytest.mark.parametrize( "seq", [ # Empty dictionary Sequence(), # Empty sequence Sequence(data_units=[]), # Sequence with immediate end-of-sequence Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ), ) ] ), # Sequence with no pictures Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header ) ), DataUnit( parse_info=ParseInfo(parse_code=tables.ParseCodes.padding_data) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.auxiliary_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ) ), ] ), ], ) def test_non_picture_sequence(self, seq): # Shouldn't crash or make any changes stream = Stream(sequences=[seq]) stream_orig = deepcopy(stream) autofill_picture_number(stream) assert stream == stream_orig @pytest.mark.parametrize( "seq", ( [ # Pictures Sequence( data_units=[ DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader( picture_number=1234, ) ), ) ] ) for parse_code in [ tables.ParseCodes.high_quality_picture, tables.ParseCodes.low_delay_picture, ] ] + [ # Fragments Sequence( data_units=[ DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=1234, fragment_slice_count=fragment_slice_count, ) ), ) ] ) for parse_code in [ tables.ParseCodes.high_quality_picture_fragment, tables.ParseCodes.low_delay_picture_fragment, ] for fragment_slice_count in [0, 1] ] ), ) def test_dont_change_non_auto_picture_numbers(self, seq): # Shouldn't crash or make any changes stream = Stream(sequences=[seq]) stream_orig = deepcopy(stream) autofill_picture_number(stream) assert stream == stream_orig @pytest.mark.parametrize( "parse_code", [tables.ParseCodes.high_quality_picture, tables.ParseCodes.low_delay_picture], ) def test_pictures(self, parse_code): stream = Stream( sequences=[ Sequence( data_units=[ # First in sequence should be auto-numbered to expected start # offset DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=AUTO) ), ), # If picture number not mentioned, it should be autofilled DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse(picture_header=PictureHeader()), ), # If explicit picture number given, should be used DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=0xFFFFFFFE) ), ), # Should continue from last explicit number if given DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=AUTO) ), ), # Should wrap-around DataUnit( parse_info=ParseInfo(parse_code=parse_code), picture_parse=PictureParse( picture_header=PictureHeader(picture_number=AUTO) ), ), ] ) ] ) autofill_picture_number(stream, 1234) picture_numbers = [ data_unit["picture_parse"]["picture_header"]["picture_number"] for seq in stream["sequences"] for data_unit in seq["data_units"] ] assert picture_numbers == [ 1234, 1235, 0xFFFFFFFE, 0xFFFFFFFF, 0x0, ] @pytest.mark.parametrize( "parse_code", [ tables.ParseCodes.high_quality_picture_fragment, tables.ParseCodes.low_delay_picture_fragment, ], ) def test_fragments(self, parse_code): stream = Stream( sequences=[ Sequence( data_units=[ # First in sequence should be auto-numbered to expected start # offset DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=0, ) ), ), # If not the first fragment in the picture, the picture number # should not be incremented DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=1, ) ), ), # If picture number not mentioned, it should still be autofilled DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_slice_count=1, ) ), ), # Should auto increment on new picture started DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=0, ) ), ), # If explicit picture number when given, should be used DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=4321, fragment_slice_count=0, ) ), ), # ...even if that changes the picture number mid picture DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=0xFFFFFFFE, fragment_slice_count=1, ) ), ), # Should continue on from last explicit number DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=0, ) ), ), # Should wrap-around DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( picture_number=AUTO, fragment_slice_count=0, ) ), ), ] ) ] ) autofill_picture_number(stream, 1234) picture_numbers = [ data_unit["fragment_parse"]["fragment_header"]["picture_number"] for seq in stream["sequences"] for data_unit in seq["data_units"] ] assert picture_numbers == [ 1234, 1234, 1234, 1235, 4321, 0xFFFFFFFE, 0xFFFFFFFF, 0x0, ] @pytest.mark.parametrize( "parse_code", [tables.ParseCodes.high_quality_picture, tables.ParseCodes.low_delay_picture], ) def test_multiple_sequences(self, parse_code): stream = Stream( sequences=[ Sequence( data_units=[ DataUnit(parse_info=ParseInfo(parse_code=parse_code)), DataUnit(parse_info=ParseInfo(parse_code=parse_code)), DataUnit(parse_info=ParseInfo(parse_code=parse_code)), ] ), Sequence( data_units=[ DataUnit(parse_info=ParseInfo(parse_code=parse_code)), DataUnit(parse_info=ParseInfo(parse_code=parse_code)), DataUnit(parse_info=ParseInfo(parse_code=parse_code)), ] ), ] ) autofill_picture_number(stream, 1234) picture_numbers = [ data_unit["picture_parse"]["picture_header"]["picture_number"] for seq in stream["sequences"] for data_unit in seq["data_units"] ] assert picture_numbers == [ 1234, 1235, 1236, # Restarts in second sequence 1234, 1235, 1236, ]