예제 #1
0
    def synthesis_transform_output(self, synthesis_input_arrays, wavelet_index,
                                   wavelet_index_ho, dwt_depth, dwt_depth_ho):
        h_filter_params = LIFTING_FILTERS[wavelet_index_ho]
        v_filter_params = LIFTING_FILTERS[wavelet_index]

        return synthesis_transform(
            h_filter_params,
            v_filter_params,
            dwt_depth,
            dwt_depth_ho,
            synthesis_input_arrays,
        )
 def synthesis_output_linexp_array(
     self,
     filter_params,
     dwt_depth,
     dwt_depth_ho,
     synthesis_input_linexp_arrays,
 ):
     return synthesis_transform(
         filter_params,
         filter_params,
         dwt_depth,
         dwt_depth_ho,
         synthesis_input_linexp_arrays,
     )[0]
 def synthesis_transform_output(
     self,
     filter_params,
     dwt_depth,
     dwt_depth_ho,
 ):
     synthesis_input_pyexp_arrays = make_variable_coeff_arrays(
         dwt_depth, dwt_depth_ho)
     return synthesis_transform(
         filter_params,
         filter_params,
         dwt_depth,
         dwt_depth_ho,
         synthesis_input_pyexp_arrays,
     )
예제 #4
0
    def test_synthesis_intermediate_steps_as_expected(self, dwt_depth,
                                                      dwt_depth_ho):
        filter_params = tables.LIFTING_FILTERS[
            tables.WaveletFilters.haar_with_shift]

        transform_coeffs = make_symbol_coeff_arrays(dwt_depth, dwt_depth_ho)

        _, intermediate_values = synthesis_transform(
            filter_params,
            filter_params,
            dwt_depth,
            dwt_depth_ho,
            transform_coeffs,
        )

        # 2D stages have all expected values
        for level in range(dwt_depth_ho + 1, dwt_depth + dwt_depth_ho + 1):
            names = set(n for l, n in intermediate_values if l == level)
            assert names == set([
                "LL",
                "LH",
                "HL",
                "HH",
                "L''",
                "L'",
                "L",
                "H''",
                "H'",
                "H",
                "DC''",
                "DC'",
                "DC",
                "Output",
            ])

        # HO stages have all expected values
        for level in range(1, dwt_depth_ho + 1):
            names = set(n for l, n in intermediate_values if l == level)
            assert names == set([
                "L",
                "H",
                "DC''",
                "DC'",
                "DC",
                "Output",
            ])
예제 #5
0
    def test_filters_invert_eachother(self, wavelet_index, wavelet_index_ho,
                                      dwt_depth, dwt_depth_ho):
        # Test that the analysis and synthesis filters invert each-other as a
        # check of consistency (and, indirectly, the correctness of the
        # analysis implementation and convert_between_synthesis_and_analysis)

        h_filter_params = tables.LIFTING_FILTERS[wavelet_index_ho]
        v_filter_params = tables.LIFTING_FILTERS[wavelet_index]

        input_picture = SymbolArray(2, "p")

        transform_coeffs, _ = analysis_transform(
            h_filter_params,
            v_filter_params,
            dwt_depth,
            dwt_depth_ho,
            input_picture,
        )
        output_picture, _ = synthesis_transform(
            h_filter_params,
            v_filter_params,
            dwt_depth,
            dwt_depth_ho,
            transform_coeffs,
        )

        # In this example, no quantisation is applied between the two filters.
        # As a consequence the only error terms arise from rounding errors in
        # the analysis and synthesis filters. Since this implementation does
        # not account for divisions of the same numbers producing the same
        # rounding errors, these rounding errors do not cancel out here.
        # However, aside from these terms, the input and output of the filters
        # should be identical.
        rounding_errors = output_picture[0, 0] - input_picture[0, 0]
        assert all(
            isinstance(sym, AAError) for sym in rounding_errors.symbols())
예제 #6
0
def test_evaluate_synthesis_test_pattern_output():
    # In this test we simply check that the decoded values match those
    # computed by the optimise_synthesis_maximising_test_pattern function

    wavelet_index = WaveletFilters.haar_with_shift
    wavelet_index_ho = WaveletFilters.le_gall_5_3
    dwt_depth = 1
    dwt_depth_ho = 0

    picture_bit_width = 10

    max_quantisation_index = 64

    quantisation_matrix = {
        0: {
            "LL": 0
        },
        1: {
            "LH": 1,
            "HL": 2,
            "HH": 3
        },
    }

    h_filter_params = LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = LIFTING_FILTERS[wavelet_index]

    input_min, input_max = signed_integer_range(picture_bit_width)

    input_array = SymbolArray(2)
    analysis_transform_coeff_arrays, _ = analysis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        input_array,
    )

    symbolic_coeff_arrays = make_symbol_coeff_arrays(dwt_depth, dwt_depth_ho)
    symbolic_output_array, symbolic_intermediate_arrays = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        symbolic_coeff_arrays,
    )

    pyexp_coeff_arrays = make_variable_coeff_arrays(dwt_depth, dwt_depth_ho)
    _, pyexp_intermediate_arrays = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        pyexp_coeff_arrays,
    )

    for (level,
         array_name), target_array in symbolic_intermediate_arrays.items():
        for x in range(target_array.period[0]):
            for y in range(target_array.period[1]):
                # Create a test pattern
                test_pattern = make_synthesis_maximising_pattern(
                    input_array,
                    analysis_transform_coeff_arrays,
                    target_array,
                    symbolic_output_array,
                    x,
                    y,
                )

                synthesis_pyexp = pyexp_intermediate_arrays[(level,
                                                             array_name)][x, y]
                # Run with no-optimisation iterations but, as a side effect,
                # compute the actual decoded value to compare with
                test_pattern = optimise_synthesis_maximising_test_pattern(
                    h_filter_params,
                    v_filter_params,
                    dwt_depth,
                    dwt_depth_ho,
                    quantisation_matrix,
                    synthesis_pyexp,
                    test_pattern,
                    input_min,
                    input_max,
                    max_quantisation_index,
                    None,
                    1,
                    None,
                    0.0,
                    0.0,
                    0,
                    0,
                )

                # Find the actual values
                lower_value, upper_value = evaluate_synthesis_test_pattern_output(
                    h_filter_params,
                    v_filter_params,
                    dwt_depth,
                    dwt_depth_ho,
                    quantisation_matrix,
                    synthesis_pyexp,
                    test_pattern,
                    input_min,
                    input_max,
                    max_quantisation_index,
                )

                assert upper_value[0] == test_pattern.decoded_value
                assert upper_value[1] == test_pattern.quantisation_index
예제 #7
0
def evaluate_test_pattern_outputs(
    wavelet_index,
    wavelet_index_ho,
    dwt_depth,
    dwt_depth_ho,
    picture_bit_width,
    quantisation_matrix,
    max_quantisation_index,
    analysis_test_patterns,
    synthesis_test_patterns,
):
    """
    Given a set of test patterns, compute the signal levels actually produced
    by them when passed through a real encoder/decoder.
    
    Parameters
    ==========
    wavelet_index : :py:class:`vc2_data_tables.WaveletFilters` or int
    wavelet_index_ho : :py:class:`vc2_data_tables.WaveletFilters` or int
    dwt_depth : int
    dwt_depth_ho : int
        The filter parameters.
    picture_bit_width : int
        The number of bits in the input pictures.
    quantisation_matrix : {level: {orient: value, ...}, ...}
        The quantisation matrix.
    max_quantisation_index : int
        The maximum quantisation index to try (e.g. as computed by
        :py:func:`quantisation_index_bound`). Each synthesis test pattern will
        be quantised with every quantisation index up to (and inclusing) this
        limit and the worst-case value for any quantisation index will be
        reported.
    analysis_test_patterns : {(level, array_name, x, y): :py:class:`~vc2_bit_widths.patterns.TestPatternSpecification`, ...}
    synthesis_test_patterns : {(level, array_name, x, y): :py:class:`~vc2_bit_widths.patterns.TestPatternSpecification`, ...}
        The test patterns to assess, e.g. from
        :py:func:`static_filter_analysis` or
        :py:func:`optimise_synthesis_test_patterns`.
    
    Returns
    =======
    analysis_test_pattern_outputs : {(level, array_name, x, y): (lower_bound, upper_bound), ...}
    synthesis_test_pattern_outputs : {(level, array_name, x, y): ((lower_bound, qi), (upper_bound, qi)), ...}
        The worst-case signal levels achieved for each of the provided test
        signals when using minimising and maximising versions of the test
        pattern respectively.
        
        For the syntehsis test patterns, the quantisation index used to achieve
        the worst-case values is also reported.
        
        Includes values for *all* arrays and phases, even if array
        interleavings/subsamplings/renamings are omitted in the input
        arguments.
    """
    h_filter_params = LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = LIFTING_FILTERS[wavelet_index]

    input_min, input_max = signed_integer_range(picture_bit_width)

    analysis_test_pattern_outputs = OrderedDict()
    for i, ((level, array_name, x, y),
            test_pattern) in enumerate(analysis_test_patterns.items()):
        logger.info(
            "Evaluating analysis test pattern %d of %d (Level %d, %s[%d, %d])...",
            i + 1,
            len(analysis_test_patterns),
            level,
            array_name,
            x,
            y,
        )
        analysis_test_pattern_outputs[(
            level, array_name, x, y)] = evaluate_analysis_test_pattern_output(
                h_filter_params=h_filter_params,
                v_filter_params=v_filter_params,
                dwt_depth=dwt_depth,
                dwt_depth_ho=dwt_depth_ho,
                level=level,
                array_name=array_name,
                test_pattern_specification=test_pattern,
                input_min=input_min,
                input_max=input_max,
            )

    _, synthesis_pyexps = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        make_variable_coeff_arrays(dwt_depth, dwt_depth_ho),
    )

    # Re-add results for interleaved/renamed entries
    analysis_test_pattern_outputs = add_missing_analysis_values(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        analysis_test_pattern_outputs,
    )

    synthesis_test_pattern_outputs = OrderedDict()
    for i, ((level, array_name, x, y),
            test_pattern) in enumerate(synthesis_test_patterns.items()):
        logger.info(
            "Evaluating synthesis test pattern %d of %d (Level %d, %s[%d, %d])...",
            i + 1,
            len(synthesis_test_patterns),
            level,
            array_name,
            x,
            y,
        )
        synthesis_test_pattern_outputs[(
            level, array_name, x, y)] = evaluate_synthesis_test_pattern_output(
                h_filter_params=h_filter_params,
                v_filter_params=v_filter_params,
                dwt_depth=dwt_depth,
                dwt_depth_ho=dwt_depth_ho,
                quantisation_matrix=quantisation_matrix,
                synthesis_pyexp=synthesis_pyexps[(
                    level, array_name)][test_pattern.target],
                test_pattern_specification=test_pattern,
                input_min=input_min,
                input_max=input_max,
                max_quantisation_index=max_quantisation_index,
            )

    # Re-add results for interleaved/renamed entries
    synthesis_test_pattern_outputs = add_missing_synthesis_values(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        synthesis_test_pattern_outputs,
    )

    return (
        analysis_test_pattern_outputs,
        synthesis_test_pattern_outputs,
    )
예제 #8
0
def optimise_synthesis_test_patterns(
    wavelet_index,
    wavelet_index_ho,
    dwt_depth,
    dwt_depth_ho,
    quantisation_matrix,
    picture_bit_width,
    synthesis_test_patterns,
    max_quantisation_index,
    random_state,
    number_of_searches,
    terminate_early,
    added_corruption_rate,
    removed_corruption_rate,
    base_iterations,
    added_iterations_per_improvement,
):
    """
    Perform a greedy search based optimisation of a complete set of synthesis
    test patterns.
    
    See :ref:`optimisation` for details of the optimisation process and
    parameters.
    
    Parameters
    ==========
    wavelet_index : :py:class:`vc2_data_tables.WaveletFilters` or int
    wavelet_index_ho : :py:class:`vc2_data_tables.WaveletFilters` or int
    dwt_depth : int
    dwt_depth_ho : int
        The filter parameters.
    quantisation_matrix : {level: {orient: value, ...}, ...}
        The quantisation matrix in use.
    picture_bit_width : int
        The number of bits in the input pictures.
    synthesis_test_patterns : {(level, array_name, x, y): :py:class:`~vc2_bit_widths.patterns.TestPatternSpecification`, ...}
        Synthesis test patterns to use as the starting point for optimisation,
        as produced by e.g.  :py:func:`static_filter_analysis`.
    max_quantisation_index : int
        The maximum quantisation index to use, e.g. computed using
        :py:func:`quantisation_index_bound`.
    random_state : :py:class:`numpy.random.RandomState`
        The random number generator to use for the search.
    number_of_searches : int
        Repeat the greedy stochastic search process this many times for each
        test pattern. Since searches will tend to converge on local minima,
        increasing this parameter will tend to produce improved results.
    terminate_early : None or int
        If an integer, stop searching if the first ``terminate_early`` searches
        fail to find an improvement. If None, always performs all searches.
    added_corruption_rate : float
        The proportion of pixels to assign with a random value during each
        search attempt (0.0-1.0).
    removed_corruption_rate : float
        The proportion of pixels to reset to their starting value during each
        search attempt (0.0-1.0).
    base_iterations : int
        The initial number of search iterations to perform in each attempt.
    added_iterations_per_improvement : int
        The number of additional search iterations to perform whenever an
        improved picture is found.
    
    Returns
    =======
    optimised_test_patterns : {(level, array_name, x, y): :py:class:`~vc2_bit_widths.patterns.OptimisedTestPatternSpecification`, ...}
        The optimised test patterns.
        
        Note that arrays are omitted for arrays which are just interleavings of
        other arrays.
    """
    # Create PyExps for all synthesis filtering stages, used to decode test
    # encoded patterns
    h_filter_params = LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = LIFTING_FILTERS[wavelet_index]
    _, synthesis_pyexps = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        make_variable_coeff_arrays(dwt_depth, dwt_depth_ho),
    )

    # Strip out all arrays which are simply interleavings of others (and
    # therefore don't need optimising several times)
    test_patterns_to_optimise = [
        (level, array_name, x, y, tp)
        for (level, array_name, x, y), tp in synthesis_test_patterns.items()
        if not synthesis_pyexps[(level, array_name)].nop
    ]

    input_min, input_max = signed_integer_range(picture_bit_width)

    optimised_test_patterns = OrderedDict()

    for signal_no, (level, array_name, x, y,
                    tp) in enumerate(test_patterns_to_optimise):
        synthesis_pyexp = synthesis_pyexps[(level, array_name)][tp.target]

        added_corruptions_per_iteration = int(
            np.ceil(len(tp.pattern) * added_corruption_rate))
        removed_corruptions_per_iteration = int(
            np.ceil(len(tp.pattern) * removed_corruption_rate))

        logger.info(
            "Optimising test pattern %d of %d (level %d, %s[%d, %d])",
            signal_no + 1,
            len(test_patterns_to_optimise),
            level,
            array_name,
            x,
            y,
        )

        best_ts = None

        for flip_polarity, log_message in [
            (False, "Maximising..."),
            (True, "Minimising..."),
        ]:
            logger.info(log_message)

            # Run the search starting from the maximising and minimising signal
            if flip_polarity:
                flipped_tp = invert_test_pattern_specification(tp)
            else:
                flipped_tp = tp

            new_ts = optimise_synthesis_maximising_test_pattern(
                h_filter_params=h_filter_params,
                v_filter_params=v_filter_params,
                dwt_depth=dwt_depth,
                dwt_depth_ho=dwt_depth_ho,
                quantisation_matrix=quantisation_matrix,
                synthesis_pyexp=synthesis_pyexp,
                test_pattern_specification=flipped_tp,
                input_min=input_min,
                input_max=input_max,
                max_quantisation_index=max_quantisation_index,
                random_state=random_state,
                number_of_searches=number_of_searches,
                terminate_early=terminate_early,
                added_corruptions_per_iteration=added_corruptions_per_iteration,
                removed_corruptions_per_iteration=
                removed_corruptions_per_iteration,
                base_iterations=base_iterations,
                added_iterations_per_improvement=
                added_iterations_per_improvement,
            )

            # NB: when given a -ve and +ve value with equal magnitude, the +ve one
            # should be kept because this may require an additional bit to
            # represent in two's compliment arithmetic (e.g. -512 is 10-bits, +512
            # is 11-bits)
            if (best_ts is None
                    or abs(new_ts.decoded_value) > abs(best_ts.decoded_value)
                    or (abs(new_ts.decoded_value) == abs(best_ts.decoded_value)
                        and new_ts.decoded_value > best_ts.decoded_value)):
                best_ts = new_ts

        logger.info(
            "Largest signal magnitude achieved = %d (qi=%d)",
            best_ts.decoded_value,
            best_ts.quantisation_index,
        )

        optimised_test_patterns[(level, array_name, x, y)] = best_ts

    return optimised_test_patterns
예제 #9
0
def static_filter_analysis(
    wavelet_index,
    wavelet_index_ho,
    dwt_depth,
    dwt_depth_ho,
    num_batches=1,
    batch_num=0,
):
    r"""
    Performs a complete static analysis of a VC-2 filter configuration,
    computing theoretical upper- and lower-bounds for signal values (see
    :ref:`theory-affine-arithmetic`) and heuristic test patterns (see
    :ref:`theory-test-patterns`) for all intermediate and final analysis and
    synthesis filter values.
    
    Parameters
    ==========
    wavelet_index : :py:class:`vc2_data_tables.WaveletFilters` or int
    wavelet_index_ho : :py:class:`vc2_data_tables.WaveletFilters` or int
    dwt_depth : int
    dwt_depth_ho : int
        The filter parameters.
    
    num_batches : int
    batch_num : int
        Though for most filters this function runs either instantaneously or at
        worst in the space of a couple of hours, unusually large filters can
        take an extremely long time to run. For example, a 4-level Fidelity
        transform may take around a month to evaluate.
        
        These arguments may be used to split this job into separate batches
        which may be computed separately (and in parallel) and later combined.
        For example, setting ``num_batches`` to 3 results in only analysing
        every third filter phase. The ``batch_num`` parameter should then be
        set to either 0, 1 or 2 to specify which third.
        
        The skipped phases are simply omitted from the returned dictionaries.
        The dictionaries returned for each batch should be unified to produce
        the complete analysis.
    
    Returns
    =======
    analysis_signal_bounds : {(level, array_name, x, y): (lower_bound_exp, upper_bound_exp), ...}
    synthesis_signal_bounds : {(level, array_name, x, y): (lower_bound_exp, upper_bound_exp), ...}
        Expressions defining the upper and lower bounds for all intermediate
        and final analysis and synthesis filter values.
        
        The keys of the returned dictionaries give the level, array name and
        filter phase for which each pair of bounds corresponds (see
        :ref:`terminology`). The naming
        conventions used are those defined by
        :py:func:`vc2_bit_widths.vc2_filters.analysis_transform` and
        :py:func:`vc2_bit_widths.vc2_filters.synthesis_transform`. Arrays which
        are just interleavings, subsamplings or renamings of other arrays are
        omitted.
        
        The lower and upper bounds are given algebraically as
        :py:class:`~vc2_bit_widths.linexp.LinExp`\ s.
        
        For the analysis filter bounds, the expressions are defined in terms of
        the variables ``LinExp("signal_min")`` and ``LinExp("signal_max")``.
        These should be substituted for the minimum and maximum picture signal
        level to find the upper and lower bounds for a particular picture bit
        width.
        
        For the synthesis filter bounds, the expressions are defined in terms
        of variables of the form ``LinExp("coeff_LEVEL_ORIENT_min")`` and
        ``LinExp("coeff_LEVEL_ORIENT_max")`` which give lower and upper bounds
        for the transform coefficients with the named level and orientation.
        
        The :py:func:`~vc2_bit_widths.helpers.evaluate_filter_bounds` function
        may be used to substitute concrete values into these expressions for a
        particular picture bit width.
        
    analysis_test_patterns: {(level, array_name, x, y): :py:class:`~vc2_bit_widths.patterns.TestPatternSpecification`, ...}
    synthesis_test_patterns: {(level, array_name, x, y): :py:class:`~vc2_bit_widths.patterns.TestPatternSpecification`, ...}
        Heuristic test patterns which are designed to maximise a particular
        intermediate or final filter value. For a minimising test pattern,
        invert the polarities of the pixels.
        
        The keys of the returned dictionaries give the level, array name and
        filter phase for which each set of bounds corresponds (see
        :ref:`terminology`). Arrays which are just interleavings, subsamplings
        or renamings of other arrays are omitted.
    """
    v_filter_params = LIFTING_FILTERS[wavelet_index]
    h_filter_params = LIFTING_FILTERS[wavelet_index_ho]

    # Create the algebraic representation of the analysis transform
    picture_array = SymbolArray(2)
    analysis_coeff_arrays, intermediate_analysis_arrays = analysis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        picture_array,
    )

    # Count the total number of arrays for use in logging messages
    num_arrays = sum(array.period[0] * array.period[1]
                     for array in intermediate_analysis_arrays.values()
                     if not array.nop)
    array_num = 0

    # Compute bounds/test pattern for every intermediate/output analysis value
    analysis_signal_bounds = OrderedDict()
    analysis_test_patterns = OrderedDict()
    for (level,
         array_name), target_array in intermediate_analysis_arrays.items():
        # Skip arrays which are just views of other arrays
        if target_array.nop:
            continue

        for x in range(target_array.period[0]):
            for y in range(target_array.period[1]):
                array_num += 1
                if (array_num - 1) % num_batches != batch_num:
                    continue

                logger.info(
                    "Analysing analysis filter %d of %d (level %d, %s[%d, %d])",
                    array_num,
                    num_arrays,
                    level,
                    array_name,
                    x,
                    y,
                )

                # Compute signal bounds
                analysis_signal_bounds[(level, array_name, x,
                                        y)] = analysis_filter_bounds(
                                            target_array[x, y])

                # Generate test pattern
                analysis_test_patterns[(level, array_name, x,
                                        y)] = make_analysis_maximising_pattern(
                                            picture_array,
                                            target_array,
                                            x,
                                            y,
                                        )

    # Create the algebraic representation of the synthesis transform
    coeff_arrays = make_symbol_coeff_arrays(dwt_depth, dwt_depth_ho)
    synthesis_output_array, intermediate_synthesis_arrays = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        coeff_arrays,
    )

    # Create a view of the analysis coefficient arrays which avoids recomputing
    # already-known analysis filter phases
    cached_analysis_coeff_arrays = {
        level: {
            orient: SymbolicPeriodicCachingArray(array, picture_array)
            for orient, array in orients.items()
        }
        for level, orients in analysis_coeff_arrays.items()
    }

    # Count the total number of arrays for use in logging messages
    num_arrays = sum(array.period[0] * array.period[1]
                     for array in intermediate_synthesis_arrays.values()
                     if not array.nop)
    array_num = 0

    # Compute bounds/test pattern for every intermediate/output analysis value
    synthesis_signal_bounds = OrderedDict()
    synthesis_test_patterns = OrderedDict()
    for (level,
         array_name), target_array in intermediate_synthesis_arrays.items():
        # Skip arrays which are just views of other arrays
        if target_array.nop:
            continue

        for x in range(target_array.period[0]):
            for y in range(target_array.period[1]):
                array_num += 1
                if (array_num - 1) % num_batches != batch_num:
                    continue

                logger.info(
                    "Analysing synthesis filter %d of %d (level %d, %s[%d, %d])",
                    array_num,
                    num_arrays,
                    level,
                    array_name,
                    x,
                    y,
                )

                # Compute signal bounds
                synthesis_signal_bounds[(level, array_name, x,
                                         y)] = synthesis_filter_bounds(
                                             target_array[x, y])

                # Compute test pattern
                synthesis_test_patterns[(
                    level, array_name, x,
                    y)] = make_synthesis_maximising_pattern(
                        picture_array,
                        cached_analysis_coeff_arrays,
                        target_array,
                        synthesis_output_array,
                        x,
                        y,
                    )

                # For extremely large filters, a noteworthy amount of overall
                # RAM can be saved by not caching synthesis filters. These
                # filters generally don't benefit much in terms of runtime from
                # caching so this has essentially no impact on runtime.
                for a in intermediate_synthesis_arrays.values():
                    a.clear_cache()

    return (
        analysis_signal_bounds,
        synthesis_signal_bounds,
        analysis_test_patterns,
        synthesis_test_patterns,
    )
def test_fast_partial_analyse_quantise_synthesise(wavelet_index, wavelet_index_ho):
    # For this test we compare the output of the pseudocode encoder with the
    # partial decoder and check that they agree. To simplify this test, the
    # Haar transform is used which is edge effect free and therefore any/every
    # pixel decoded by this implementation should exactly match to pseudocode.
    h_filter_params = LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = LIFTING_FILTERS[wavelet_index]
    dwt_depth = 1
    dwt_depth_ho = 2
    
    # We're using the wrong matrix here (since the default matrices don't
    # include this type of asymmetric filter) but this is unimportant since any
    # matrix will do...
    quantisation_matrix = QUANTISATION_MATRICES[(
        wavelet_index,
        wavelet_index,
        dwt_depth,
        dwt_depth_ho,
    )]
    
    # This should be enough to get to the point where all transform
    # coefficients are zero in this test
    quantisation_indices = list(range(64))
    
    # Create a test image
    width = 16
    height = 4
    rand = np.random.RandomState(1)
    picture = rand.randint(-512, 511, (height, width))
    
    # Encode the picture using the pseudocode
    state = State(
        wavelet_index=wavelet_index,
        wavelet_index_ho=wavelet_index_ho,
        dwt_depth=dwt_depth,
        dwt_depth_ho=dwt_depth_ho,
    )
    pseudocode_coeffs = dwt(state, picture.tolist())
    
    # Quantise/Decode using the pseudocode at each quantisation level in turn
    # to generate 'model answers' for every pixel
    pseudocode_decoded_pictures = []
    for qi in quantisation_indices:
        pseudocode_decoded_pictures.append(idwt(state, {
            level: {
                orient: [
                    [
                        quant_roundtrip(
                            value,
                            max(0, qi - quantisation_matrix[level][orient]),
                        )
                        for value in row
                    ]
                    for row in array
                ]
                for orient, array in orients.items()
            }
            for level, orients in pseudocode_coeffs.items()
        }))
    
    # Create decoder function expressions
    synthesis_expressions, _ = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        make_variable_coeff_arrays(dwt_depth, dwt_depth_ho)
    )
    
    # Check decoding of every pixel individually matches the pseudocode at
    # every quantisation level.
    for y in range(height):
        for x in range(width):
            codec = FastPartialAnalyseQuantiseSynthesise(
                h_filter_params,
                v_filter_params,
                dwt_depth,
                dwt_depth_ho,
                quantisation_matrix,
                quantisation_indices,
                synthesis_expressions[x, y],
            )
            decoded_values = codec.analyse_quantise_synthesise(picture.copy())
            for reference_decoded_picture, decoded_value in zip(
                pseudocode_decoded_pictures,
                decoded_values,
            ):
                assert decoded_value == reference_decoded_picture[y][x]
예제 #11
0
def test_integration():
    # A simple integration test which computes signal bounds for a small
    # transform operation

    filter_params = LIFTING_FILTERS[WaveletFilters.haar_with_shift]
    dwt_depth = 1
    dwt_depth_ho = 1

    input_picture_array = SymbolArray(2)
    analysis_coeff_arrays, analysis_intermediate_values = analysis_transform(
        filter_params,
        filter_params,
        dwt_depth,
        dwt_depth_ho,
        input_picture_array,
    )

    input_coeff_arrays = make_symbol_coeff_arrays(dwt_depth, dwt_depth_ho)
    synthesis_output, synthesis_intermediate_values = synthesis_transform(
        filter_params,
        filter_params,
        dwt_depth,
        dwt_depth_ho,
        input_coeff_arrays,
    )

    signal_min = LinExp("signal_min")
    signal_max = LinExp("signal_max")

    example_range = {signal_min: -512, signal_max: 511}

    # Input signal bounds should be as specified
    assert analysis_filter_bounds(
        analysis_intermediate_values[(2, "Input")][0, 0], ) == (signal_min,
                                                                signal_max)

    # Output of final analysis filter should require a greater depth (NB: for
    # the Haar transform it is the high-pass bands which gain the largest
    # signal range)
    analysis_output_lower, analysis_output_upper = analysis_filter_bounds(
        analysis_intermediate_values[(1, "H")][0, 0], )
    assert analysis_output_lower.subs(example_range) < signal_min.subs(
        example_range)
    assert analysis_output_upper.subs(example_range) > signal_max.subs(
        example_range)

    example_coeff_range = {
        "coeff_{}_{}_{}".format(level, orient, minmax):
        maximum_dequantised_magnitude(
            int(round(value.subs(example_range).constant)))
        for level, orients in analysis_coeff_arrays.items()
        for orient, expr in orients.items()
        for minmax, value in zip(["min", "max"], analysis_filter_bounds(expr))
    }

    # Signal range should shrink down by end of synthesis process but should
    # still be larger than the original signal
    final_output_lower, final_output_upper = synthesis_filter_bounds(
        synthesis_output[0, 0])

    assert final_output_upper.subs(
        example_coeff_range) < analysis_output_upper.subs(example_range)
    assert final_output_lower.subs(
        example_coeff_range) > analysis_output_lower.subs(example_range)

    assert final_output_upper.subs(example_coeff_range) > signal_max.subs(
        example_range)
    assert final_output_lower.subs(example_coeff_range) < signal_min.subs(
        example_range)
예제 #12
0
def test_add_missing_synthesis_values(
    wavelet_index,
    wavelet_index_ho,
    dwt_depth,
    dwt_depth_ho,
):
    h_filter_params = tables.LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = tables.LIFTING_FILTERS[wavelet_index]

    _, intermediate_values = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        make_symbol_coeff_arrays(dwt_depth, dwt_depth_ho),
    )

    all_expressions = {
        (level, array_name, x, y): array[x, y]
        for (level, array_name), array in intermediate_values.items()
        for x in range(array.period[0]) for y in range(array.period[1])
    }

    non_nop_expressions = {
        (level, array_name, x, y): array[x, y]
        for (level, array_name), array in intermediate_values.items()
        for x in range(array.period[0]) for y in range(array.period[1])
        if not array.nop
    }

    # Sanity check
    assert all_expressions != non_nop_expressions

    refilled_expressions_not_filled = add_missing_synthesis_values(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        non_nop_expressions,
        fill_in_equivalent_phases=False,
    )

    refilled_expressions_filled = add_missing_synthesis_values(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        non_nop_expressions,
        fill_in_equivalent_phases=True,
    )

    assert set(refilled_expressions_not_filled) == set(all_expressions)
    assert set(refilled_expressions_filled) == set(all_expressions)

    for key in all_expressions:
        if refilled_expressions_not_filled[key] is not None:
            # Where a phase hasn't been repeated, should have exactly the same
            # value.
            assert (
                all_expressions[key] == refilled_expressions_not_filled[key])
            assert (all_expressions[key] == refilled_expressions_filled[key])
        else:
            # Where a phase has been filled in, without going to a lot of
            # effort, all we can do is check that the substituted phase is
            # 'similar' (i.e. likely to be the same expression just with
            # different coordinates)
            assert (sorted(
                coeff for sym, coeff in all_expressions[key]) == sorted(
                    coeff for sym, coeff in refilled_expressions_filled[key]))
예제 #13
0
    def test_filters_match_pseudocode(self, wavelet_index, wavelet_index_ho):
        # This test checks that the filters implement the same behaviour as the
        # VC-2 pseudocode, including compatible operation ordering. This test is
        # carried out on a relatively small Haar transform because::
        #
        # * The Haar transform is free from edge effects making the
        #   InfiniteArray implementation straight-forwardly equivalent to the
        #   pseudocode behaviour in all cases (not just non-edge cases)
        # * The Haar transform is available in a form with and without the bit
        #   shift so we can check that the bit shift parameter is used
        #   correctly and taken from the correct wavelet index.
        # * Using large transform depths or filters produces very large
        #   functions for analysis transforms under PyExp which can crash
        #   Python interpreters. (In practice they'll only ever be generated
        #   for synthesis transforms which produce small code even for large
        #   transforms)

        width = 16
        height = 8

        dwt_depth = 1
        dwt_depth_ho = 2

        # Create a random picture to analyse
        rand = np.random.RandomState(1)
        random_input_picture = rand.randint(-512, 511, (height, width))

        # Analyse using pseudocode
        state = State(
            wavelet_index=wavelet_index,
            wavelet_index_ho=wavelet_index_ho,
            dwt_depth=dwt_depth,
            dwt_depth_ho=dwt_depth_ho,
        )
        pseudocode_coeffs = dwt(state, random_input_picture.tolist())

        # Analyse using InfiniteArrays
        h_filter_params = tables.LIFTING_FILTERS[wavelet_index_ho]
        v_filter_params = tables.LIFTING_FILTERS[wavelet_index]
        ia_coeffs, _ = analysis_transform(
            h_filter_params,
            v_filter_params,
            dwt_depth,
            dwt_depth_ho,
            VariableArray(2, Argument("picture")),
        )

        # Compare analysis results
        for level in pseudocode_coeffs:
            for orient in pseudocode_coeffs[level]:
                pseudocode_data = pseudocode_coeffs[level][orient]
                for row, row_data in enumerate(pseudocode_data):
                    for col, pseudocode_value in enumerate(row_data):
                        # Create and call a function to compute this value via
                        # InfiniteArrays/PyExp
                        expr = ia_coeffs[level][orient][col, row]
                        f = expr.make_function()
                        # NB: Array is transposed to support (x, y) indexing
                        ia_value = f(random_input_picture.T)

                        assert ia_value == pseudocode_value

        # Synthesise using pseudocode
        pseudocode_picture = idwt(state, pseudocode_coeffs)

        # Synthesise using InfiniteArrays
        ia_picture, _ = synthesis_transform(
            h_filter_params,
            v_filter_params,
            dwt_depth,
            dwt_depth_ho,
            make_variable_coeff_arrays(dwt_depth, dwt_depth_ho),
        )

        # Create numpy-array based coeff data for use by
        # InfiniteArray-generated functions (NB: arrays are transposed to
        # support (x, y) indexing.
        ia_coeffs_data = {
            level: {
                orient: np.array(array, dtype=np.int64).T
                for orient, array in orients.items()
            }
            for level, orients in pseudocode_coeffs.items()
        }

        # Compare synthesis results

        for row, row_data in enumerate(pseudocode_picture):
            for col, pseudocode_value in enumerate(row_data):
                # Create and call a function to compute this value via
                # InfiniteArrays/PyExp
                expr = ia_picture[col, row]
                f = expr.make_function()
                # NB: Arrays are transposed to support (x, y) indexing
                ia_value = f(ia_coeffs_data)

                assert ia_value == pseudocode_value
def test_greedy_stochastic_search():
    # This test is fairly crude. It just tests that the search terminates, does
    # not crash and improves on the initial input signal, however slightly.

    wavelet_index = WaveletFilters.le_gall_5_3
    wavelet_index_ho = WaveletFilters.le_gall_5_3
    dwt_depth = 1
    dwt_depth_ho = 0

    h_filter_params = LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = LIFTING_FILTERS[wavelet_index]

    quantisation_matrix = QUANTISATION_MATRICES[(
        wavelet_index,
        wavelet_index_ho,
        dwt_depth,
        dwt_depth_ho,
    )]

    quantisation_indices = list(range(64))

    input_min = -511
    input_max = 256

    width = 16
    height = 8

    # Arbitrary test pattern
    rand = np.random.RandomState(1)
    input_pattern = rand.choice((input_min, input_max), (height, width))

    # Restrict search-space to bottom-right three quarters only
    search_slice = (slice(height // 4, None), slice(width // 4, None))

    # Arbitrary output pixel
    output_array, intermediate_arrays = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        make_variable_coeff_arrays(dwt_depth, dwt_depth_ho),
    )
    synthesis_pyexp = output_array[width // 2, height // 2]

    codec = FastPartialAnalyseQuantiseSynthesise(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        quantisation_matrix,
        quantisation_indices,
        synthesis_pyexp,
    )

    kwargs = {
        "starting_pattern": input_pattern.copy(),
        "search_slice": search_slice,
        "input_min": input_min,
        "input_max": input_max,
        "codec": codec,
        "random_state": np.random.RandomState(1),
        "added_corruptions_per_iteration": 1,
        "removed_corruptions_per_iteration": 1,
        "added_iterations_per_improvement": 50,
    }

    # Get the baseline after no searches performed
    base_input_pattern, base_decoded_value, base_qi, decoded_values = greedy_stochastic_search(
        base_iterations=0, **kwargs)
    assert decoded_values == []

    # Check that when run for some time we get an improved result
    new_input_pattern, new_decoded_value, new_qi, decoded_values = greedy_stochastic_search(
        base_iterations=100, **kwargs)
    assert not np.array_equal(new_input_pattern, base_input_pattern)
    assert abs(new_decoded_value) > abs(base_decoded_value)
    assert new_qi in quantisation_indices
    assert len(decoded_values) > 100
    assert decoded_values[-1] == new_decoded_value

    # Check haven't mutated the supplied starting pattern argument
    assert np.array_equal(kwargs["starting_pattern"], input_pattern)

    # Check that only the specified slice was modified
    before = input_pattern.copy()
    before[search_slice] = 0
    after = new_input_pattern.copy()
    after[search_slice] = 0
    assert np.array_equal(before, after)

    # Check that all values are in range
    assert np.all((new_input_pattern >= input_min)
                  & (new_input_pattern <= input_max))
예제 #15
0
def test_aggregation_flag(tmpdir, capsys, arg, exp_phases):
    # Check that aggregation of filter phases works

    f = str(tmpdir.join("file.json"))

    # vc2-static-filter-analysis
    assert sfa(shlex.split("-w haar_with_shift -d 1 -o") + [f]) == 0

    # vc2-bit-widths-table
    assert bwt([f] + shlex.split("-b 10 {}".format(arg))) == 0

    csv_rows = list(csv.reader(capsys.readouterr().out.splitlines()))

    columns = csv_rows[0][:-5]

    # Check all phase columns are present as expected
    if exp_phases:
        assert columns == ["type", "level", "array_name", "x", "y"]
    else:
        assert columns == ["type", "level", "array_name"]

    # Check the rows are as expected
    row_headers = [tuple(row[:-5]) for row in csv_rows[1:]]

    # ...by comparing with the intermediate arrays expected for this filter...
    h_filter_params = LIFTING_FILTERS[WaveletFilters.haar_with_shift]
    v_filter_params = LIFTING_FILTERS[WaveletFilters.haar_with_shift]
    dwt_depth = 1
    dwt_depth_ho = 0

    _, analysis_intermediate_arrays = analysis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        SymbolArray(2),
    )
    _, synthesis_intermediate_arrays = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        make_symbol_coeff_arrays(dwt_depth, dwt_depth_ho),
    )

    if exp_phases:
        assert row_headers == [
            (type_name, str(level), array_name, str(x), str(y))
            for type_name, intermediate_arrays in [
                ("analysis", analysis_intermediate_arrays),
                ("synthesis", synthesis_intermediate_arrays),
            ] for (level, array_name), array in intermediate_arrays.items()
            for x in range(array.period[0]) for y in range(array.period[1])
        ]
    else:
        assert row_headers == [(type_name, str(level), array_name)
                               for type_name, intermediate_arrays in [
                                   ("analysis", analysis_intermediate_arrays),
                                   ("synthesis",
                                    synthesis_intermediate_arrays),
                               ] for level, array_name in intermediate_arrays]
예제 #16
0
def test_generate_test_pictures():
    wavelet_index = WaveletFilters.haar_with_shift
    wavelet_index_ho = WaveletFilters.le_gall_5_3
    dwt_depth = 1
    dwt_depth_ho = 0

    h_filter_params = LIFTING_FILTERS[wavelet_index_ho]
    v_filter_params = LIFTING_FILTERS[wavelet_index]

    quantisation_matrix = {
        0: {
            "LL": 0
        },
        1: {
            "LH": 1,
            "HL": 2,
            "HH": 3
        },
    }

    picture_width = 16
    picture_height = 8
    picture_bit_width = 10
    value_offset = 1 << (picture_bit_width - 1)

    (
        analysis_signal_bounds,
        synthesis_signal_bounds,
        analysis_test_patterns,
        synthesis_test_patterns,
    ) = static_filter_analysis(
        wavelet_index,
        wavelet_index_ho,
        dwt_depth,
        dwt_depth_ho,
    )

    (
        concrete_analysis_signal_bounds,
        concrete_synthesis_signal_bounds,
    ) = evaluate_filter_bounds(
        wavelet_index,
        wavelet_index_ho,
        dwt_depth,
        dwt_depth_ho,
        analysis_signal_bounds,
        synthesis_signal_bounds,
        picture_bit_width,
    )

    max_quantisation_index = quantisation_index_bound(
        concrete_analysis_signal_bounds,
        quantisation_matrix,
    )

    (
        analysis_test_pattern_outputs,
        synthesis_test_pattern_outputs,
    ) = evaluate_test_pattern_outputs(
        wavelet_index,
        wavelet_index_ho,
        dwt_depth,
        dwt_depth_ho,
        picture_bit_width,
        quantisation_matrix,
        max_quantisation_index,
        analysis_test_patterns,
        synthesis_test_patterns,
    )

    (
        analysis_pictures,
        synthesis_pictures,
    ) = generate_test_pictures(
        picture_width,
        picture_height,
        picture_bit_width,
        analysis_test_patterns,
        synthesis_test_patterns,
        synthesis_test_pattern_outputs,
    )

    # Check all test patterns and maximise/minimise options were included
    assert set((tp.level, tp.array_name, tp.x, tp.y, tp.maximise)
               for p in analysis_pictures for tp in p.test_points) == set(
                   (level, array_name, x, y, maximise)
                   for (level, array_name, x, y) in analysis_test_patterns
                   for maximise in [True, False])
    assert set((tp.level, tp.array_name, tp.x, tp.y, tp.maximise)
               for p in synthesis_pictures for tp in p.test_points) == set(
                   (level, array_name, x, y, maximise)
                   for (level, array_name, x, y) in synthesis_test_patterns
                   for maximise in [True, False])

    # Test analysis pictures do what they claim
    for analysis_picture in analysis_pictures:
        for test_point in analysis_picture.test_points:
            # Perform analysis on the whole test picture, capturing the target
            # value along the way
            target_value = fast_partial_analysis_transform(
                h_filter_params,
                v_filter_params,
                dwt_depth,
                dwt_depth_ho,
                analysis_picture.picture.copy() -
                value_offset,  # NB: Argument is mutated
                (
                    test_point.level,
                    test_point.array_name,
                    test_point.tx,
                    test_point.ty,
                ),
            )

            # Compare with expected output level for that test pattern
            expected_outputs = analysis_test_pattern_outputs[(
                test_point.level,
                test_point.array_name,
                test_point.x,
                test_point.y,
            )]
            if test_point.maximise:
                expected_value = expected_outputs[1]
            else:
                expected_value = expected_outputs[0]

            assert target_value == expected_value

    # PyExps required for synthesis implementation
    _, synthesis_pyexps = synthesis_transform(
        h_filter_params,
        v_filter_params,
        dwt_depth,
        dwt_depth_ho,
        make_variable_coeff_arrays(dwt_depth, dwt_depth_ho),
    )

    # Test synthesis pictures do what they claim
    for synthesis_picture in synthesis_pictures:
        for test_point in synthesis_picture.test_points:
            # Perform analysis, quantisation and synthesis on the whole test
            # picture, capturing just the target synthesis value
            codec = FastPartialAnalyseQuantiseSynthesise(
                h_filter_params,
                v_filter_params,
                dwt_depth,
                dwt_depth_ho,
                quantisation_matrix,
                [synthesis_picture.quantisation_index],
                synthesis_pyexps[(
                    test_point.level,
                    test_point.array_name,
                )][test_point.tx, test_point.ty],
            )
            # NB: Argument is mutated
            target_value = codec.analyse_quantise_synthesise(
                synthesis_picture.picture.copy() - value_offset, )[0]

            # Compare with expected output level for that test pattern
            expected_outputs = synthesis_test_pattern_outputs[(
                test_point.level,
                test_point.array_name,
                test_point.x,
                test_point.y,
            )]
            if test_point.maximise:
                expected_value = expected_outputs[1][0]
            else:
                expected_value = expected_outputs[0][0]

            assert target_value == expected_value