def get_transform_parameters(data_unit): """ For internal use. Given a :py:class:`~vc2_conformance.bitstream.vc2_fixeddicts.DataUnit`, if the data unit contains a picture or the first fragment of a fragmented picture, returns the :py:class:`~vc2_conformance.bitstream.vc2_fixeddicts.TransformParameters`, creating an empty one if it is not defined. Otherwise returns None. """ parse_code = get_auto(data_unit.get("parse_info", {}), "parse_code", ParseInfo) if is_picture(State(parse_code=parse_code)): return ( data_unit.setdefault("picture_parse", {}) .setdefault("wavelet_transform", {}) .setdefault("transform_parameters", {}) ) elif ( is_fragment(State(parse_code=parse_code)) and get_auto( data_unit.get("fragment_parse", {}).get("fragment_header", {}), "fragment_slice_count", FragmentHeader, ) == 0 ): return data_unit.setdefault("fragment_parse", {}).setdefault( "transform_parameters", {} ) else: return None
def test_iter_sequence_headers(codec_features): sequence_headers = list(iter_sequence_headers(codec_features)) assert sequence_headers pi = ParseInfo( parse_code=ParseCodes.sequence_header, # An arbitrary non-zero value; this won't get picked up by the # conformance checker since it'll hit the end-of-file first next_parse_offset=999, ) for sh in sequence_headers: # Set the version to one compatible with the levels tried in this test sh.setdefault("parse_parameters", {})["major_version"] = 2 f = BytesIO() state = State() serialise(pi, parse_info, state, f) video_parameters = serialise(sh, sequence_header, state, f) # Check the encoded video parameters are as requested assert video_parameters == codec_features["video_parameters"] # Check that the header does everything that the level requires. Here we # just check we reach the end of the sequence header without a conformance # error. f.seek(0) state = State() init_io(state, f) with pytest.raises((UnexpectedEndOfStream, InconsistentNextParseOffset)): parse_stream(state) assert f.tell() == len(f.getvalue())
def test_iter_slices_in_sequence(profile, fragment_slice_count): codec_features = MINIMAL_CODEC_FEATURES.copy() codec_features["profile"] = profile codec_features["fragment_slice_count"] = fragment_slice_count codec_features["slices_x"] = 3 codec_features["slices_y"] = 2 codec_features["picture_bytes"] = 100 num_pictures = 2 sequence = make_sequence( codec_features, repeat_pictures( mid_gray( codec_features["video_parameters"], codec_features["picture_coding_mode"], ), num_pictures, ), ) slices = list(iter_slices_in_sequence(codec_features, sequence)) # Should have found every slice assert len(slices) == (codec_features["slices_x"] * codec_features["slices_y"] * num_pictures) # Should have correct states if profile == Profiles.high_quality: for state, _, _, _ in slices: assert state == State( slice_prefix_bytes=0, slice_size_scaler=1, slices_x=codec_features["slices_x"], slices_y=codec_features["slices_y"], ) elif profile == Profiles.low_delay: slice_bytes = Fraction( codec_features["picture_bytes"], codec_features["slices_x"] * codec_features["slices_y"], ) for state, _, _, _ in slices: assert state == State( slice_bytes_numerator=slice_bytes.numerator, slice_bytes_denominator=slice_bytes.denominator, slices_x=codec_features["slices_x"], slices_y=codec_features["slices_y"], ) # Should have correct coordinates it = iter(slices) for _ in range(num_pictures): for exp_sy in range(codec_features["slices_y"]): for exp_sx in range(codec_features["slices_x"]): _, sx, sy, _ = next(it) assert exp_sx == sx assert exp_sy == sy
def test_slices_have_same_dimensions(slices_x, slices_y, state_override, exp_same_dimensions): state = State( luma_width=1920, luma_height=1080, color_diff_width=960, color_diff_height=540, dwt_depth=0, dwt_depth_ho=0, slices_x=slices_x, slices_y=slices_y, ) state.update(state_override) assert slices_have_same_dimensions(state) is exp_same_dimensions
def test_all_decoder_test_cases(codec_features, test_case): # Every test case for every basic video mode must produce a valid bitstream # containing pictures with the correct format. Any JSON metadata must also # be seriallisable. # Must return a Stream assert isinstance(test_case.value, Stream) # Mustn't crash! json.dumps(test_case.metadata) # Serialise f = BytesIO() autofill_and_serialise_stream(f, test_case.value) f.seek(0) # Deserialise/validate def output_picture_callback(picture, video_parameters, picture_coding_mode): assert video_parameters == codec_features["video_parameters"] assert picture_coding_mode == codec_features["picture_coding_mode"] state = State(_output_picture_callback=output_picture_callback, ) with alternative_level_1(): init_io(state, f) parse_stream(state)
def missing_sequence_header_bitstream_fname(tmpdir): fname = str(tmpdir.join("bitstream.vc2")) with open(fname, "wb") as f: w = bitstream.BitstreamWriter(f) state = State(major_version=3) context = bitstream.ParseInfo(parse_code=tables.ParseCodes.high_quality_picture) with bitstream.Serialiser(w, context, bitstream.vc2_default_values) as ser: bitstream.parse_info(ser, state) context = bitstream.PictureParse( wavelet_transform=bitstream.WaveletTransform( transform_parameters=bitstream.TransformParameters( slice_parameters=bitstream.SliceParameters( slices_x=0, slices_y=0, ), ), ), ) with bitstream.Serialiser(w, context, bitstream.vc2_default_values) as ser: bitstream.picture_parse(ser, state) return fname
def padding_sequence_bitstream_fname(tmpdir): fname = str(tmpdir.join("bitstream.vc2")) with open(fname, "wb") as f: context = bitstream.Sequence( data_units=[ bitstream.DataUnit( parse_info=bitstream.ParseInfo( parse_code=tables.ParseCodes.padding_data, next_parse_offset=tables.PARSE_INFO_HEADER_BYTES + 2, ), padding=bitstream.Padding(bytes=b"\xAA\xFF"), ), bitstream.DataUnit( parse_info=bitstream.ParseInfo( parse_code=tables.ParseCodes.end_of_sequence, ) ), ] ) w = bitstream.BitstreamWriter(f) with bitstream.Serialiser(w, context, bitstream.vc2_default_values) as ser: bitstream.parse_sequence(ser, State()) return fname
def test_magnitude(self): hq_slice = HQSlice( qindex=0, slice_y_length=1, slice_c1_length=2, slice_c2_length=3, y_transform=[0, 0, 0, 0, 0, 0], c1_transform=[0, 0, 0], c2_transform=[0, 0, 0], ) cut_off_value_at_end_of_hq_slice( State(slice_size_scaler=2), 0, 0, hq_slice, "Y", DanglingTransformValueType.sign_dangling, magnitude=3, ) for value in reversed(hq_slice["y_transform"]): if value != 0: break assert value.bit_length() == 3 self.sanity_check(2, hq_slice)
def test_min_length(self, component, dangle_type): hq_slice = HQSlice( qindex=0, slice_y_length=1, slice_c1_length=2, slice_c2_length=3, y_transform=[0, 0, 0, 0, 0, 0], c1_transform=[0, 0, 0], c2_transform=[0, 0, 0], ) cut_off_value_at_end_of_hq_slice( State(slice_size_scaler=2), 0, 0, hq_slice, component, dangle_type, magnitude=1, min_length=7, ) assert (hq_slice["slice_y_length"] + hq_slice["slice_c1_length"] + hq_slice["slice_c2_length"]) == 7 self.sanity_check(2, hq_slice)
def make_state(self): return State( slice_bytes_numerator=6, slice_bytes_denominator=1, slices_x=1, slices_y=1, )
def test_fill_already_aligned(self): hq_slice = HQSlice( qindex=0, slice_y_length=1, slice_c1_length=2, slice_c2_length=3, y_transform=[0, 0, 0, 0, 0, 0, 0, 0], c1_transform=[0, 0, 0, 0], c2_transform=[0, 0, 0, 0], ) fill_hq_slice_padding( State(slice_size_scaler=1), 0, 0, hq_slice, "Y", b"\x00\xFF\xAA", True, ) assert hq_slice["y_block_padding"] == bitarray( # Repeat one... "00000000" "11111111" "10101010" # Repeat two "00000000" "11111111" # Trunacated! ) assert hq_slice.get("c1_block_padding", bitarray()) == bitarray() assert hq_slice.get("c2_block_padding", bitarray()) == bitarray() self.sanity_check(1, hq_slice)
def test_sums_lengths(self, slice_size_scaler, component): hq_slice = HQSlice( qindex=0, slice_y_length=1, slice_c1_length=2, slice_c2_length=3, y_transform=[0, 0, 0, 0, 0, 0], c1_transform=[0, 0, 0], c2_transform=[0, 0, 0], ) fill_hq_slice_padding( State(slice_size_scaler=slice_size_scaler), 0, 0, hq_slice, component, b"\x00", ) if component == "Y": assert hq_slice["slice_y_length"] == 1 + 2 + 3 else: assert hq_slice["slice_y_length"] == 0 if component == "C1": assert hq_slice["slice_c1_length"] == 1 + 2 + 3 else: assert hq_slice["slice_c1_length"] == 0 if component == "C2": assert hq_slice["slice_c2_length"] == 1 + 2 + 3 else: assert hq_slice["slice_c2_length"] == 0 self.sanity_check(slice_size_scaler, hq_slice)
def sanity_check(self, slice_size_scaler, slice): """ Checks that the provided slice serializes correctly. """ slice.setdefault("prefix_bytes", bytes()) slice.setdefault("y_block_padding", bitarray()) slice.setdefault("c1_block_padding", bitarray()) slice.setdefault("c2_block_padding", bitarray()) f = BytesIO() with Serialiser(BitstreamWriter(f), slice) as ser: hq_slice( ser, State( slice_prefix_bytes=0, slice_size_scaler=slice_size_scaler, dwt_depth=0, dwt_depth_ho=0, luma_width=len(slice["y_transform"]), luma_height=1, color_diff_width=len(slice["c1_transform"]), color_diff_height=1, slices_x=1, slices_y=1, ), 0, 0, )
def test_min_length(self): ld_slice = LDSlice( qindex=0, slice_y_length=16, y_transform=[0, 0, 0, 0, 0, 0], c_transform=[0, 0, 0, 0, 0, 0], ) cut_off_value_at_end_of_ld_slice( State( slice_bytes_numerator=4, slice_bytes_denominator=1, slices_x=1, slices_y=1, ), 0, 0, ld_slice, "Y", DanglingTransformValueType.sign_dangling, magnitude=3, ) for value in reversed(ld_slice["y_transform"]): if value != 0: break assert value.bit_length() == 3 self.sanity_check(ld_slice)
def test_next_parse_offset_is_zero(profile, fragment_slice_count): # Verify that the next parse offset really is zero after serialisation # Generate codec_features = dict( MINIMAL_CODEC_FEATURES, profile=profile, fragment_slice_count=fragment_slice_count, ) stream = absent_next_parse_offset(codec_features) # Serialise f = BytesIO() autofill_and_serialise_stream(f, stream) # Deserialise f.seek(0) with Deserialiser(BitstreamReader(f)) as des: parse_stream(des, State()) decoded_stream = des.context # Only the sequence header at the start should have a next_parse_offset # defined. next_parse_offset_is_zero = [ data_unit["parse_info"]["next_parse_offset"] == 0 for seq in decoded_stream["sequences"] for data_unit in seq["data_units"] ] assert not next_parse_offset_is_zero[0] assert all(next_parse_offset_is_zero[1:])
def deserialise_and_measure_slice_data_unit_sizes(bitstream): """ Deserialise a bitstream, returning the number of bytes in each data unit which contains picture slices. """ out = [] slice_start_offset = [None] contained_slices = [False] def monitor(des, target, _value): # Dirty: Relies on implementation detail of vc2_fixeddicts... if des.path(target)[4:] in ( ["picture_parse", "wavelet_transform", "padding"], ["fragment_parse", "fragment_header", "fragment_slice_count"], ["fragment_parse", "fragment_header", "fragment_y_offset"], ): slice_start_offset[0] = to_bit_offset(*des.io.tell()) elif target == "qindex": contained_slices[0] = True elif target == "parse_code" and slice_start_offset[0] is not None: if contained_slices[0]: out.append(to_bit_offset(*des.io.tell()) - slice_start_offset[0]) contained_slices[0] = False reader = BitstreamReader(BytesIO(bitstream)) with MonitoredDeserialiser(monitor, reader) as des: parse_stream(des, State()) return out
def test_read_past_eof_crashes(func, args): f = BytesIO() state = State(bits_left=1) decoder.init_io(state, f) with pytest.raises(decoder.UnexpectedEndOfStream): func(state, *args)
def test_qindex_matters(self): codec_features = deepcopy(MINIMAL_CODEC_FEATURES) codec_features["lossless"] = True codec_features["picture_bytes"] = None # Sanity check: Make sure we're outputting some kind of picture which # really does depend on quantization pictures = {False: [], True: []} for override_qindex in [False, True]: stream = lossless_quantization(codec_features) if override_qindex: for _state, _sx, _sy, hq_slice in iter_slices_in_sequence( codec_features, stream["sequences"][0], ): hq_slice["qindex"] = 0 # Serialise f = BytesIO() autofill_and_serialise_stream(f, stream) f.seek(0) # Decode def output_picture_callback(picture, video_parameters, picture_coding_mode): pictures[override_qindex].append(picture) state = State(_output_picture_callback=output_picture_callback) init_io(state, f) parse_stream(state) # Make sure that the qindex mattered by checking that decoding with # qindex clamped to 0 resulted in different pictures assert pictures[False] != pictures[True]
def test_make_parse_parameters(): state = State() serialise(make_parse_parameters(MINIMAL_CODEC_FEATURES), parse_parameters, state) assert state["major_version"] == 3 # From vc2_default_values assert state["minor_version"] == 0 assert state["profile"] == Profiles.high_quality assert state["level"] == Levels.unconstrained
def test_valid_bitstream(self): # This test runs all of the option sets produced through the bitstream # generator to verify that 1: the dictionaries all contain the expected # fields and 2: that they encode the options they're supposed to. This # test also indirectly tests all of the contributing option generating # functions. base_video_format = BaseVideoFormats.hd1080p_50 video_parameters = set_source_defaults(base_video_format) level_constraints_dict = defaultdict(AnyValue) source_parameters_sets = list( iter_source_parameter_options( video_parameters, video_parameters, level_constraints_dict ) ) for context in source_parameters_sets: state = State() f = BytesIO() with Serialiser(BitstreamWriter(f), context) as ser: resulting_video_parameters = source_parameters( ser, state, base_video_format, ) assert resulting_video_parameters == video_parameters
def test_whole_picture(parse_code): # A sanity check which runs picture decoding for whole pictures and makes # sure nothing crashes # Serialise a sample stream sh = bitstream.SequenceHeader( video_parameters=bitstream.SourceParameters( frame_size=bitstream.FrameSize( # Don't waste time on full-sized frames custom_dimensions_flag=True, frame_width=4, frame_height=4, ), clean_area=bitstream.CleanArea( custom_clean_area_flag=True, clean_width=4, clean_height=4, ), ), ) serialisation_state = State() sh_bytes = serialise_to_bytes(sh, serialisation_state) serialisation_state["parse_code"] = parse_code pic_bytes = serialise_to_bytes(bitstream.PictureParse(), serialisation_state) # Check it is parsed without failiures state = bytes_to_state(sh_bytes + pic_bytes) state["_num_pictures_in_sequence"] = 0 decoder.sequence_header(state) state["parse_code"] = parse_code decoder.picture_parse(state)
def output_decoder_test_case(output_dir, codec_features, test_case): """ Write a decoder test case to disk. Parameters ========== output_dir : str Output directory to write test cases to. codec_features : :py:class:`~vc2_conformance.codec_features.CodecFeatures` test_case : :py:class:`~vc2_conformance.test_cases.TestCase` """ # Serialise bitstream bitstream_filename = os.path.join( output_dir, "{}.vc2".format(test_case.name), ) with open(bitstream_filename, "wb") as f: autofill_and_serialise_stream(f, test_case.value) # Decode model answer model_answer_directory = os.path.join( output_dir, "{}_expected".format(test_case.name), ) makedirs(model_answer_directory, exist_ok=True) with open(bitstream_filename, "rb") as f: index = [0] def output_picture(picture, video_parameters, picture_coding_mode): file_format.write( picture, video_parameters, picture_coding_mode, os.path.join( model_answer_directory, "picture_{}.raw".format(index[0]), ), ) index[0] += 1 state = State(_output_picture_callback=output_picture) init_io(state, f) parse_stream(state) # Write metadata if test_case.metadata is not None: with open( os.path.join( output_dir, "{}_metadata.json".format(test_case.name), ), "w", ) as f: json.dump(test_case.metadata, f) logging.info( "Generated decoder test case %s for %s", test_case.name, codec_features["name"], )
def test_component_selection(self, component, dangle_type): ld_slice = LDSlice( qindex=0, slice_y_length=16, y_transform=[1, 2, 3, 4, 5, 6], c_transform=[7, 8, 9, 0, 1, 2], ) cut_off_value_at_end_of_ld_slice( State( slice_bytes_numerator=4, slice_bytes_denominator=1, slices_x=1, slices_y=1, ), 0, 0, ld_slice, component, dangle_type, magnitude=1, ) # Check other components are zeroed out for other_component in ["Y", "C"]: if other_component != component: values = ld_slice["{}_transform".format( other_component.lower())] for value in values: assert value == 0 assert len(values) == 6 # Check target component is not zeroed out (NB: this test will only # work for the dangling zeros case if the total number of values is < # 16) values = ld_slice["{}_transform".format(component.lower())] assert len(values) == 6 assert values != [0, 0, 0, 0, 0, 0] # Check that the components do write beyond the end of the slice if component == "Y": component_bits = ld_slice["slice_y_length"] else: component_bits = ( # Slice size (4 * 8) - # Qindex 7 - # 5 bit size 5) - ld_slice["slice_y_length"] actual_bits = sum(map(signed_exp_golomb_length, values)) if dangle_type != DanglingTransformValueType.zero_dangling: for n in reversed(values): if n == 0: actual_bits -= 1 else: break assert actual_bits > component_bits self.sanity_check(ld_slice)
def test_component_selection(self, component, dangle_type): slice_size_scaler = 2 hq_slice = HQSlice( qindex=0, slice_y_length=1, slice_c1_length=2, slice_c2_length=3, y_transform=[1, 2, 3, 4, 5, 6], c1_transform=[7, 8, 9], c2_transform=[0, 1, 2], ) cut_off_value_at_end_of_hq_slice( State(slice_size_scaler=slice_size_scaler), 0, 0, hq_slice, component, dangle_type, magnitude=1, ) # Check other components are zeroed out for other_component in ["Y", "C1", "C2"]: if other_component != component: values = hq_slice["{}_transform".format( other_component.lower())] for value in values: assert value == 0 if other_component == "Y": assert len(values) == 6 else: assert len(values) == 3 # Check target component is not zeroed out (NB: this test will only # work for the dangling zeros case if the total number of values is < # 2*8*slice_size_scaler) values = hq_slice["{}_transform".format(component.lower())] if component == "Y": assert len(values) == 6 assert values != [0, 0, 0, 0, 0, 0] else: assert len(values) == 3 assert values != [0, 0, 0] # Check that the components do write beyond the end of the slice component_bits = ( slice_size_scaler * hq_slice["slice_{}_length".format(component.lower())] * 8) actual_bits = sum(map(signed_exp_golomb_length, values)) if dangle_type != DanglingTransformValueType.zero_dangling: for n in reversed(values): if n == 0: actual_bits -= 1 else: break assert actual_bits > component_bits self.sanity_check(slice_size_scaler, hq_slice)
def test_record_partial_bytes(self): f = BytesIO(b"\xAA\xFF") state = State() decoder.init_io(state, f) decoder.record_bitstream_start(state) assert decoder.read_nbits(state, 12) == 0xAAF assert decoder.record_bitstream_finish(state) == b"\xAA\xF0" assert decoder.read_nbits(state, 4) == 0xF
def test_record_whole_number_of_bytes(self): # Also records reading right up to the EOF f = BytesIO(b"\xAA\xBF") state = State() decoder.init_io(state, f) decoder.record_bitstream_start(state) assert decoder.read_nbits(state, 16) == 0xAABF assert decoder.record_bitstream_finish(state) == b"\xAA\xBF"
def test_empty_recording(self): f = BytesIO() state = State() decoder.init_io(state, f) decoder.record_bitstream_start(state) assert "_recorded_bytes" in state assert decoder.record_bitstream_finish(state) == bytearray() assert "_recorded_bytes" not in state
def test_make_dummy_end_of_sequence(): # Should deserialise correctly f = BytesIO(make_dummy_end_of_sequence()) with Deserialiser(BitstreamReader(f)) as des: parse_sequence(des, State()) assert len(des.context["data_units"]) == 1 assert (des.context["data_units"][0]["parse_info"]["parse_code"] == ParseCodes.end_of_sequence)
def serialise(context, pseudocode, state=None, file=None, *args, **kwargs): state = state if state is not None else State() file = file if file is not None else BytesIO() with Serialiser( BitstreamWriter(file), context, vc2_default_values, ) as ser: return pseudocode(ser, state, *args, **kwargs)
def test_function_in_bitstream_module(self): try: # Should crash due to missing 'major_version' in state bitstream.transform_parameters(Mock(), State()) except: # noqa: 722 exc_type, exc_value, exc_tb = sys.exc_info() assert ( most_recent_pseudocode_function(exc_tb) == "transform_parameters (12.4.1)" )