def test_autofill_and_serialise_stream(): f = BytesIO() # Sequence with every data unit type and fully automatic numbers stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.sequence_header ), sequence_header=SequenceHeader( video_parameters=SourceParameters( # Tiny custom frame-size used to reduce test suite runtime frame_size=FrameSize( custom_dimensions_flag=True, frame_width=4, frame_height=4, ) ), ), ), # Pictures DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.low_delay_picture ), ), # High quality fragment DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=0) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=1) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=1) ), ), # Low delay fragment DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=0) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=1) ), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.high_quality_picture_fragment ), fragment_parse=FragmentParse( fragment_header=FragmentHeader(fragment_slice_count=1) ), ), # Other types DataUnit( parse_info=ParseInfo(parse_code=tables.ParseCodes.padding_data), padding=Padding(bytes=b"123"), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.auxiliary_data ), auxiliary_data=AuxiliaryData(bytes=b"123"), ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ), ), ] ) ] ) autofill_and_serialise_stream(f, stream) f.seek(0) r = BitstreamReader(f) with Deserialiser(r) as serdes: vc2.parse_stream(serdes, State()) parse_infos = [ data_unit["parse_info"] for sequence in serdes.context["sequences"] for data_unit in sequence["data_units"] ] # Check for start/end offsets being zero assert parse_infos[0]["previous_parse_offset"] == 0 assert parse_infos[-1]["next_parse_offset"] == 0 # Check for consistency and plausibility of offsets for pi1, pi2 in zip(parse_infos, parse_infos[1:]): assert pi1["next_parse_offset"] > 13 assert pi2["previous_parse_offset"] > 13 assert pi1["next_parse_offset"] == pi2["previous_parse_offset"] # Check picture numbers picture_numbers = [ ( data_unit.get("picture_parse", {}).get("picture_header", {}) or data_unit.get("fragment_parse", {}).get("fragment_header", {}) ).get("picture_number") for sequence in serdes.context["sequences"] for data_unit in sequence["data_units"] ] assert picture_numbers == [ None, 0, 1, 2, 2, 2, 3, 3, 3, None, None, None, ] # Check major version is autofilled with 3 (due to presence of fragments) major_versions = [ data_unit["sequence_header"]["parse_parameters"]["major_version"] for sequence in serdes.context["sequences"] for data_unit in sequence["data_units"] if data_unit["parse_info"]["parse_code"] == tables.ParseCodes.sequence_header ] assert all(v == 3 for v in major_versions)
def test_works_on_multiple_sequences(self): f = BytesIO() w = BitstreamWriter(f) # Sequence with every data unit type and fully automatic numbers stream = Stream( sequences=[ Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ) ), ] ), Sequence( data_units=[ DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.padding_data ) ), DataUnit( parse_info=ParseInfo( parse_code=tables.ParseCodes.end_of_sequence ) ), ] ), ] ) ( next_parse_offsets_to_autofill, previous_parse_offsets_to_autofill, ) = autofill_parse_offsets(stream) print(stream) with Serialiser(w, stream, vc2_default_values_with_auto) as serdes: vc2.parse_stream(serdes, State()) w.flush() autofill_parse_offsets_finalize( w, serdes.context, next_parse_offsets_to_autofill, previous_parse_offsets_to_autofill, ) f.seek(0) r = BitstreamReader(f) with Deserialiser(r) as serdes: vc2.parse_stream(serdes, State()) parse_infos = [ [data_unit["parse_info"] for data_unit in sequence["data_units"]] for sequence in serdes.context["sequences"] ] # Check for start/end offsets being zero for sequence_pis in parse_infos: assert sequence_pis[0]["previous_parse_offset"] == 0 assert sequence_pis[-1]["next_parse_offset"] == 0 # Check for offset correctness for pi1, pi2 in zip(sequence_pis, sequence_pis[1:]): assert pi1["next_parse_offset"] == 13 assert pi2["previous_parse_offset"] == 13
def make_fragment_parse_data_units(codec_features, picture, minimum_qindex=0, minimum_slice_size_scaler=1): r""" Create a series of :py:class:`DataUnits <vc2_conformance.bitstream.DataUnit>` encoding a (possibly lossily compressed) picture. Parameters ========== codec_features : :py:class:`~vc2_conformance.codec_features.CodecFeatures` picture : {"Y": [[s, ...], ...], "C1": ..., "C2": ..., "pic_num": int} The picture to be encoded. This picture will be compressed using a simple VC-2 encoder implementation. It does not necessarily produce the most high-quality encodings. If ``pic_num`` is omitted, ``picture_number`` fields will be omitted in the output. minimum_qindex : int Specifies the minimum quantization index to be used. Must be 0 for lossless codecs. minimum_slice_size_scaler : int Specifies the minimum slice_size_scaler to be used for high quality pictures. Ignored in low delay mode. Returns ======= fragment_data_units : [:py:class:`vc2_conformance.bitstream.DataUnit`, ...] """ assert codec_features["fragment_slice_count"] != 0 # To avoid repeating ourselves, the fragmented picture is assembled from # the parts of a ready-made piture_parse. picture_parse = make_picture_parse(codec_features, picture, minimum_qindex, minimum_slice_size_scaler) wavelet_transform = picture_parse["wavelet_transform"] transform_parameters = wavelet_transform["transform_parameters"] transform_data = wavelet_transform["transform_data"] if codec_features["profile"] == Profiles.high_quality: parse_code = ParseCodes.high_quality_picture_fragment slices_name = "hq_slices" elif codec_features["profile"] == Profiles.low_delay: parse_code = ParseCodes.low_delay_picture_fragment slices_name = "ld_slices" fragment_data_units = [] # Add the first fragment containing the transform parameters fragment_data_units.append( DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_data_length=0, fragment_slice_count=0, ), transform_parameters=transform_parameters, ), )) # A count of how many slices worth of space remain in the current # (slice-containing) fragment. Initially set to zero as we don't have any # picture containing fragments. fragment_slices_remaining = 0 # Add the remaining fragments containing the picture slices slice_iterator = iter(transform_data[slices_name]) for sy in range(codec_features["slices_y"]): for sx in range(codec_features["slices_x"]): # If the current fragment is full, start a new one if fragment_slices_remaining == 0: fragment_slices_remaining = codec_features[ "fragment_slice_count"] fragment_data_units.append( DataUnit( parse_info=ParseInfo(parse_code=parse_code), fragment_parse=FragmentParse( fragment_header=FragmentHeader( fragment_data_length=0, # NB: Will be incremented in the next step(s) fragment_slice_count=0, fragment_x_offset=sx, fragment_y_offset=sy, ), fragment_data=FragmentData({ # NB: Will be populated in the next step(s) slices_name: [], }), ), )) # Add the slice to the current fragment fragment_parse = fragment_data_units[-1]["fragment_parse"] fragment_parse["fragment_header"]["fragment_slice_count"] += 1 fragment_parse["fragment_data"][slices_name].append( next(slice_iterator)) fragment_slices_remaining -= 1 # Populate picture_number field in fragment headers, if one is provided if "pic_num" in picture: for data_unit in fragment_data_units: fragment_header = data_unit["fragment_parse"]["fragment_header"] fragment_header["picture_number"] = picture["pic_num"] return fragment_data_units
def make_padding_data_unit(): return DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.padding_data), padding=Padding(), )
def make_auxiliary_data_unit(): return DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.auxiliary_data), auxiliary_data=AuxiliaryData(), )
def make_end_of_sequence_data_unit(): return DataUnit( parse_info=ParseInfo(parse_code=ParseCodes.end_of_sequence), )