Example #1
0
 def analyse_quantise_synthesise(self, picture):
     """
     Encode, quantise (at multiple levels) and then decode the supplied
     picture. The decoded result at each quantisation level will be
     returned.
     
     Parameters
     ==========
     picture : :py:class:`numpy.array`
         The picture to be encoded. Will be corrupted as a side effect of
         calling this method.
         
         Width must be a multiple of ``2**(dwt_depth+dwt_depth_ho)`` pixels
         and height a multiple of ``2**dwt_depth`` pixels.
         
         Dimensions must also be sufficient that all transform coefficients
         used by the ``synthesis_exp`` will be edge-effect free.
     
     Returns
     =======
     decoded_values : [value, ...]
         The decoded value at for each quantisation index in
         ``quantisation_indices``.
     """
     # Encode
     fast_partial_analysis_transform(
         self._h_filter_params,
         self._v_filter_params,
         self._dwt_depth,
         self._dwt_depth_ho,
         picture,
     )
     
     # Extract only the coeffs required for decoding
     transform_coeffs_vector = picture[self._transform_coeff_index_array]
     
     # Quantise (by every quantisation level at once)
     all_quantised_transform_coeff_vectors = apply_quantisation_sweep(
         self._quantisation_factor_matrix,
         self._quantisation_offset_matrix,
         transform_coeffs_vector,
     )
     
     # Decode at each quantisation level
     return np.array([
         self._decode(coeff_vector.tolist())
         for coeff_vector in all_quantised_transform_coeff_vectors
     ])
def test_to_interleaved_transform_coord(dwt_depth, dwt_depth_ho):
    width = 32
    height = 16
    
    # Create a signal with unique values in every pixel
    interleaved = np.arange(width*height).reshape((height, width))
    
    # As a somewhat dirty trick we use the fast_partial_analysis_transform
    # (with a null filter) to produce a set of array views of a correctly
    # interleaved input array.
    null_filter = LiftingFilterParameters(filter_bit_shift=0, stages=[])
    subband_views = fast_partial_analysis_transform(
        null_filter,
        null_filter,
        dwt_depth,
        dwt_depth_ho,
        interleaved,
    )
    
    for level in subband_views:
        for orient in subband_views[level]:
            subband_view = subband_views[level][orient]
            for sb_y in range(subband_view.shape[0]):
                for sb_x in range(subband_view.shape[1]):
                    il_x, il_y = to_interleaved_transform_coord(
                        dwt_depth,
                        dwt_depth_ho,
                        level,
                        orient,
                        sb_x,
                        sb_y,
                    )
                    
                    assert interleaved[il_y, il_x] == subband_view[sb_y, sb_x]
Example #3
0
    def test_bad_target(self):
        wavelet_index = tables.WaveletFilters.haar_with_shift
        wavelet_index_ho = tables.WaveletFilters.haar_with_shift
        dwt_depth = 0
        dwt_depth_ho = 2

        h_filter_params = tables.LIFTING_FILTERS[wavelet_index_ho]
        v_filter_params = tables.LIFTING_FILTERS[wavelet_index]

        picture = np.array([[1, 2, 3, 4]])

        with pytest.raises(ValueError):
            fast_partial_analysis_transform(
                h_filter_params,
                v_filter_params,
                dwt_depth,
                dwt_depth_ho,
                picture.copy(),
                (4, "foo", 0, 0),
            )
Example #4
0
 def check_target(level, array_name, model_answers):
     for ty, row in enumerate(model_answers):
         for tx, model_value in enumerate(row):
             assert fast_partial_analysis_transform(
                 h_filter_params,
                 v_filter_params,
                 dwt_depth,
                 dwt_depth_ho,
                 picture.copy(),
                 (level, array_name, tx, ty),
             ) == model_value
Example #5
0
def evaluate_analysis_test_pattern_output(
    h_filter_params,
    v_filter_params,
    dwt_depth,
    dwt_depth_ho,
    level,
    array_name,
    test_pattern_specification,
    input_min,
    input_max,
):
    """
    Given an analysis test pattern (e.g. created using
    :py:func:`make_analysis_maximising_pattern`), return the actual intermediate
    encoder value when the signal is processed.
    
    Parameters
    ==========
    h_filter_params : :py:class:`vc2_data_tables.LiftingFilterParameters`
    v_filter_params : :py:class:`vc2_data_tables.LiftingFilterParameters`
        Horizontal and vertical filter *synthesis* (not analysis!) filter
        parameters (e.g. from :py:data:`vc2_data_tables.LIFTING_FILTERS`)
        defining the wavelet transform used.
    dwt_depth : int
    dwt_depth_ho : int
        The transform depth used by the filters.
    level : int
    array_name : str
        The intermediate value in the encoder the test pattern targets.
    test_pattern_specification : :py:class:`~vc2_bit_widths.patterns.TestPatternSpecification`
        The test pattern to evaluate.
    input_min : int
    input_max : int
        The minimum and maximum value which may be used in the test pattern.
    
    Returns
    =======
    encoded_minimum : int
    encoded_maximum : int
        The target encoder value when the test pattern encoded with minimising
        and maximising signal levels respectively.
    """
    tx, ty = test_pattern_specification.target

    # NB: Casting to native int from numpy for JSON serialisability etc.
    return tuple(
        int(
            fast_partial_analysis_transform(
                h_filter_params,
                v_filter_params,
                dwt_depth,
                dwt_depth_ho,
                convert_test_pattern_to_padded_picture_and_slice(
                    test_pattern_specification.pattern,
                    cur_min,
                    cur_max,
                    dwt_depth,
                    dwt_depth_ho,
                )[0],
                (level, array_name, tx, ty),
            )) for cur_min, cur_max in [
                # Minimise encoder output
                (input_max, input_min),
                # Maximise encoder output
                (input_min, input_max),
            ])
Example #6
0
def test_fast_partial_analysis_transform_no_target(wavelet_index,
                                                   wavelet_index_ho, dwt_depth,
                                                   dwt_depth_ho):
    # This test verifies the analysis transform produces identical results to
    # the pseudocode in the case where no edge effects are encountered.

    width = 32
    height = 8

    rand = np.random.RandomState(1)
    signal = rand.randint(-512, 511, (height, width))

    # Process using pseudocode
    state = State(
        wavelet_index=wavelet_index,
        wavelet_index_ho=wavelet_index_ho,
        dwt_depth=dwt_depth,
        dwt_depth_ho=dwt_depth_ho,
    )
    pseudocode_out = dwt(state, signal.tolist())

    # Process using matrix transform
    h_filter_params = tables.LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = tables.LIFTING_FILTERS[wavelet_index]
    matrix_out = fast_partial_analysis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        signal.copy(),
    )

    # Using a symbolic representation of the transform operation and use this
    # to create masks identifying the coefficients which are edge-effect free.
    symbolic_out, _ = analysis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        SymbolArray(2),
    )
    edge_effect_free_pixel_mask = {
        level: {
            orient: np.array([[
                all(0 <= sym[1] < width and 0 <= sym[2] < height
                    for sym in strip_affine_errors(symbolic_out[level][orient][
                        col, row]).symbols() if sym is not None)
                for col in range(matrix_out[level][orient].shape[1])
            ] for row in range(matrix_out[level][orient].shape[0])])
            for orient in matrix_out[level]
        }
        for level in matrix_out
    }

    # Sanity check: Ensure that in every transform subband there is at least
    # one edge-effect free value (otherwise the test needs to be modified to
    # use a larger input picture.
    assert all(
        np.any(mask) for level, orients in edge_effect_free_pixel_mask.items()
        for orient, mask in orients.items())

    # Compare the two outputs and ensure all edge-effect free pixels are
    # identical
    assert set(pseudocode_out) == set(matrix_out)
    for level in matrix_out:
        assert set(pseudocode_out[level]) == set(matrix_out[level])
        for orient in matrix_out[level]:
            pseudocode_array = np.array(pseudocode_out[level][orient])
            matrix_array = matrix_out[level][orient]
            mask = edge_effect_free_pixel_mask[level][orient]
            assert np.array_equal(pseudocode_array[mask], matrix_array[mask])
Example #7
0
def test_generate_test_pictures():
    wavelet_index = WaveletFilters.haar_with_shift
    wavelet_index_ho = WaveletFilters.le_gall_5_3
    dwt_depth = 1
    dwt_depth_ho = 0

    h_filter_params = LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = LIFTING_FILTERS[wavelet_index]

    quantisation_matrix = {
        0: {
            "LL": 0
        },
        1: {
            "LH": 1,
            "HL": 2,
            "HH": 3
        },
    }

    picture_width = 16
    picture_height = 8
    picture_bit_width = 10
    value_offset = 1 << (picture_bit_width - 1)

    (
        analysis_signal_bounds,
        synthesis_signal_bounds,
        analysis_test_patterns,
        synthesis_test_patterns,
    ) = static_filter_analysis(
        wavelet_index,
        wavelet_index_ho,
        dwt_depth,
        dwt_depth_ho,
    )

    (
        concrete_analysis_signal_bounds,
        concrete_synthesis_signal_bounds,
    ) = evaluate_filter_bounds(
        wavelet_index,
        wavelet_index_ho,
        dwt_depth,
        dwt_depth_ho,
        analysis_signal_bounds,
        synthesis_signal_bounds,
        picture_bit_width,
    )

    max_quantisation_index = quantisation_index_bound(
        concrete_analysis_signal_bounds,
        quantisation_matrix,
    )

    (
        analysis_test_pattern_outputs,
        synthesis_test_pattern_outputs,
    ) = evaluate_test_pattern_outputs(
        wavelet_index,
        wavelet_index_ho,
        dwt_depth,
        dwt_depth_ho,
        picture_bit_width,
        quantisation_matrix,
        max_quantisation_index,
        analysis_test_patterns,
        synthesis_test_patterns,
    )

    (
        analysis_pictures,
        synthesis_pictures,
    ) = generate_test_pictures(
        picture_width,
        picture_height,
        picture_bit_width,
        analysis_test_patterns,
        synthesis_test_patterns,
        synthesis_test_pattern_outputs,
    )

    # Check all test patterns and maximise/minimise options were included
    assert set((tp.level, tp.array_name, tp.x, tp.y, tp.maximise)
               for p in analysis_pictures for tp in p.test_points) == set(
                   (level, array_name, x, y, maximise)
                   for (level, array_name, x, y) in analysis_test_patterns
                   for maximise in [True, False])
    assert set((tp.level, tp.array_name, tp.x, tp.y, tp.maximise)
               for p in synthesis_pictures for tp in p.test_points) == set(
                   (level, array_name, x, y, maximise)
                   for (level, array_name, x, y) in synthesis_test_patterns
                   for maximise in [True, False])

    # Test analysis pictures do what they claim
    for analysis_picture in analysis_pictures:
        for test_point in analysis_picture.test_points:
            # Perform analysis on the whole test picture, capturing the target
            # value along the way
            target_value = fast_partial_analysis_transform(
                h_filter_params,
                v_filter_params,
                dwt_depth,
                dwt_depth_ho,
                analysis_picture.picture.copy() -
                value_offset,  # NB: Argument is mutated
                (
                    test_point.level,
                    test_point.array_name,
                    test_point.tx,
                    test_point.ty,
                ),
            )

            # Compare with expected output level for that test pattern
            expected_outputs = analysis_test_pattern_outputs[(
                test_point.level,
                test_point.array_name,
                test_point.x,
                test_point.y,
            )]
            if test_point.maximise:
                expected_value = expected_outputs[1]
            else:
                expected_value = expected_outputs[0]

            assert target_value == expected_value

    # PyExps required for synthesis implementation
    _, synthesis_pyexps = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        make_variable_coeff_arrays(dwt_depth, dwt_depth_ho),
    )

    # Test synthesis pictures do what they claim
    for synthesis_picture in synthesis_pictures:
        for test_point in synthesis_picture.test_points:
            # Perform analysis, quantisation and synthesis on the whole test
            # picture, capturing just the target synthesis value
            codec = FastPartialAnalyseQuantiseSynthesise(
                h_filter_params,
                v_filter_params,
                dwt_depth,
                dwt_depth_ho,
                quantisation_matrix,
                [synthesis_picture.quantisation_index],
                synthesis_pyexps[(
                    test_point.level,
                    test_point.array_name,
                )][test_point.tx, test_point.ty],
            )
            # NB: Argument is mutated
            target_value = codec.analyse_quantise_synthesise(
                synthesis_picture.picture.copy() - value_offset, )[0]

            # Compare with expected output level for that test pattern
            expected_outputs = synthesis_test_pattern_outputs[(
                test_point.level,
                test_point.array_name,
                test_point.x,
                test_point.y,
            )]
            if test_point.maximise:
                expected_value = expected_outputs[1][0]
            else:
                expected_value = expected_outputs[0][0]

            assert target_value == expected_value