Exemplo n.º 1
0
def h_analysis(state, data):
    """
    Horizontal-only analysis, inverse of h_synthesis (15.4.2).

    Returns a tuple (L_data, H_data)
    """
    # Bit shift, if required
    shift = filter_bit_shift(state)
    if shift > 0:
        for y in range(0, height(data)):
            for x in range(0, width(data)):
                data[y][x] = data[y][x] << shift

    # Analysis
    for y in range(0, height(data)):
        oned_analysis(row(data, y), state["wavelet_index_ho"])

    # De-interleave the transform data
    L_data = new_array(height(data), width(data) // 2)
    H_data = new_array(height(data), width(data) // 2)
    for y in range(0, (height(data))):
        for x in range(0, (width(data) // 2)):
            L_data[y][x] = data[y][2 * x]
            H_data[y][x] = data[y][(2 * x) + 1]

    return (L_data, H_data)
Exemplo n.º 2
0
def vh_synthesis(state, LL_data, HL_data, LH_data, HH_data):
    """(15.4.3) Interleaved vertical and horizontal synthesis."""
    synth = new_array(2 * height(LL_data), 2 * width(LL_data))

    # Interleave transform data (as expected by synthesis routine)
    for y in range(height(synth) // 2):
        for x in range(width(synth) // 2):
            synth[2 * y][2 * x] = LL_data[y][x]
            synth[2 * y][2 * x + 1] = HL_data[y][x]
            synth[2 * y + 1][2 * x] = LH_data[y][x]
            synth[2 * y + 1][2 * x + 1] = HH_data[y][x]

    # Synthesis
    for x in range(width(synth)):
        oned_synthesis(column(synth, x), state["wavelet_index"])
    for y in range(height(synth)):
        oned_synthesis(row(synth, y), state["wavelet_index_ho"])

    # Bit shift, if required
    shift = filter_bit_shift(state)
    if shift > 0:
        for y in range(height(synth)):
            for x in range(width(synth)):
                synth[y][x] = (synth[y][x] + (1 << (shift - 1))) >> shift

    return synth
Exemplo n.º 3
0
def two_d_array():
    """A 2D array of numbers for test purposes."""
    a = new_array(16, 32)
    for y in range(height(a)):
        for x in range(width(a)):
            a[y][x] = (y * 100) + x
    return a
Exemplo n.º 4
0
def offset_component(state, comp_data, c):
    """(15.5) Remove picture value offsets from a single component."""
    for y in range(height(comp_data)):
        for x in range(width(comp_data)):
            if c == "Y":
                comp_data[y][x] += 2 ** (state["luma_depth"] - 1)
            else:
                comp_data[y][x] += 2 ** (state["color_diff_depth"] - 1)
Exemplo n.º 5
0
def vh_analysis(state, data):
    """
    Interleaved vertical and horizontal analysis, inverse of vh_synthesis (15.4.3).

    Returns a tuple (LL_data, HL_data, LH_data, HH_data)
    """
    # Bit shift, if required
    shift = filter_bit_shift(state)
    if shift > 0:
        for y in range(0, height(data)):
            for x in range(0, width(data)):
                data[y][x] = data[y][x] << shift

    # Analysis
    for y in range(0, height(data)):
        oned_analysis(row(data, y), state["wavelet_index_ho"])
    for x in range(0, width(data)):
        oned_analysis(column(data, x), state["wavelet_index"])

    # De-interleave the transform data
    LL_data = new_array(height(data) // 2, width(data) // 2)
    HL_data = new_array(height(data) // 2, width(data) // 2)
    LH_data = new_array(height(data) // 2, width(data) // 2)
    HH_data = new_array(height(data) // 2, width(data) // 2)
    for y in range(0, (height(data) // 2)):
        for x in range(0, (width(data) // 2)):
            LL_data[y][x] = data[2 * y][2 * x]
            HL_data[y][x] = data[2 * y][2 * x + 1]
            LH_data[y][x] = data[2 * y + 1][2 * x]
            HH_data[y][x] = data[2 * y + 1][2 * x + 1]

    return (LL_data, HL_data, LH_data, HH_data)
Exemplo n.º 6
0
def h_synthesis(state, L_data, H_data):
    """(15.4.2) Horizontal-only synthesis."""
    synth = new_array(height(L_data), 2 * width(L_data))

    # Interleave transform data (as expected by synthesis routine)
    for y in range(height(synth)):
        for x in range(width(synth) // 2):
            synth[y][2 * x] = L_data[y][x]
            synth[y][(2 * x) + 1] = H_data[y][x]

    # Synthesis
    for y in range(height(synth)):
        oned_synthesis(row(synth, y), state["wavelet_index_ho"])

    # Bit shift, if required
    shift = filter_bit_shift(state)
    if shift > 0:
        for y in range(height(synth)):
            for x in range(width(synth)):
                synth[y][x] = (synth[y][x] + (1 << (shift - 1))) >> shift

    return synth
Exemplo n.º 7
0
def apply_dc_prediction(band):
    """(13.4) Inverse of dc_prediction."""
    for y in reversed(range(0, height(band))):
        for x in reversed(range(0, width(band))):
            if x > 0 and y > 0:
                prediction = mean(band[y][x - 1], band[y - 1][x - 1],
                                  band[y - 1][x])
            elif x > 0 and y == 0:
                prediction = band[0][x - 1]
            elif x == 0 and y > 0:
                prediction = band[y - 1][0]
            else:
                prediction = 0
            band[y][x] -= prediction
Exemplo n.º 8
0
def clip_component(state, comp_data, c):
    """(15.5)"""
    for y in range(height(comp_data)):
        for x in range(width(comp_data)):
            if c == "Y":
                comp_data[y][x] = clip(
                    comp_data[y][x],
                    -(2**(state["luma_depth"] - 1)),
                    2**(state["luma_depth"] - 1) - 1,
                )
            else:
                comp_data[y][x] = clip(
                    comp_data[y][x],
                    -(2**(state["color_diff_depth"] - 1)),
                    2**(state["color_diff_depth"] - 1) - 1,
                )
Exemplo n.º 9
0
def remove_offset_component(state, comp_data, c):
    """
    Inverse of offset_component (15.5). Centers picture values around zero.

    Parameters
    ==========
    state : :py:class:`vc2_conformance.pseudocode.state.State`
        Where ``luma_depth`` and ``color_diff_depth`` are defined.
    current_picture : {comp: [[pixel_value, ...], ...], ...}
        Will be mutated in-place.
    """
    for y in range(height(comp_data)):
        for x in range(width(comp_data)):
            if c == "Y":
                comp_data[y][x] -= 2 ** (state["luma_depth"] - 1)
            else:
                comp_data[y][x] -= 2 ** (state["color_diff_depth"] - 1)
Exemplo n.º 10
0
def num_array():
    a = new_array(3, 5)
    for x in range(width(a)):
        for y in range(height(a)):
            a[y][x] = x + (y * 10)
    return a
Exemplo n.º 11
0
def test_width():
    assert width(new_array(0, 0)) == 0
    assert width(new_array(20, 10)) == 10
Exemplo n.º 12
0
def make_transform_data_ld_lossy(picture_bytes,
                                 transform_coeffs,
                                 minimum_qindex=0):
    """
    Quantize and pack transform coefficients into LD picture slices in a
    :py:class:`TransformData`.

    Raises :py:exc:`InsufficientLDPictureBytesError` if ``picture_bytes`` is too
    small.

    Parameters
    ==========
    picture_bytes : int
        The total number of bytes the picture slices should take up (i.e.
        including qindex and slice_y_length fields). Must be at least 1 byte
        per slice.
    transform_coeffs : [[:py:class:`SliceCoeffs`, ...], ...]
    minimum_qindex : int
        If provided, gives the quantization index to start with when trying to
        find a suitable quantization index.

    Returns
    =======
    transform_data : :py:class:`vc2_conformance.bitstream.TransformData`
    """
    # Used by slice_bytes
    state = State(
        slices_x=width(transform_coeffs),
        slices_y=height(transform_coeffs),
        slice_bytes_numerator=picture_bytes,
        slice_bytes_denominator=width(transform_coeffs) *
        height(transform_coeffs),
    )

    transform_data = TransformData(ld_slices=[])
    for sy, transform_coeffs_row in enumerate(transform_coeffs):
        for sx, transform_coeffs_slice in enumerate(transform_coeffs_row):
            target_size = 8 * slice_bytes(state, sx, sy)
            target_size -= 7  # qindex field
            target_size -= intlog2(target_size)  # slice_y_length field

            if target_size < 0:
                raise InsufficientLDPictureBytesError()

            # Interleave color components
            y_coeffs = transform_coeffs_slice.Y
            c_coeffs = ComponentCoeffs(
                coeff_values=interleave(
                    transform_coeffs_slice.C1.coeff_values,
                    transform_coeffs_slice.C2.coeff_values,
                ),
                quant_matrix_values=interleave(
                    transform_coeffs_slice.C1.quant_matrix_values,
                    transform_coeffs_slice.C2.quant_matrix_values,
                ),
            )

            # Quantize each slice to fit
            qindex, (y_transform, c_transform) = quantize_to_fit(
                target_size,
                [y_coeffs, c_coeffs],
                minimum_qindex=minimum_qindex,
            )
            transform_data["ld_slices"].append(
                make_ld_slice(
                    y_transform,
                    c_transform,
                    qindex,
                ))

    return transform_data
Exemplo n.º 13
0
def make_transform_data_hq_lossy(picture_bytes,
                                 transform_coeffs,
                                 minimum_qindex=0,
                                 minimum_slice_size_scaler=1):
    """
    Quantize and pack transform coefficients into HQ picture slices in a
    :py:class:`TransformData`.

    Raises :py:exc:`InsufficientHQPictureBytesError` if ``picture_bytes`` is too
    small.

    Parameters
    ==========
    picture_bytes : int
        The total number of bytes the picture slices should take up (i.e.
        including ``qindex`` and ``slice_{y,c1,c2}_length`` fields). Must allow
        at least 4 bytes per slice. When slice sizes are large enough to
        require a slice_size_scaler larger than 1, ``picture_bytes -
        4*num_slices`` must be a multiple of ``slice_size_scaler``, otherwise
        the total picture size will deviate by up to ``slice_size_scaler``
        bytes from ``picture_bytes``.
    transform_coeffs : [[:py:class:`SliceCoeffs`, ...], ...]
    minimum_qindex : int
        If provided, gives the quantization index to start with when trying to
        find a suitable quantization index.
    minimum_slice_size_scaler : int
        Specifies the minimum slice_size_scaler to be used for high quality
        pictures. Ignored in low delay mode.

    Returns
    =======
    slice_size_scaler : int
    transform_data : :py:class:`vc2_conformance.bitstream.TransformData`
    """
    slices_x = width(transform_coeffs)
    slices_y = height(transform_coeffs)

    # Determine the largest size a slice could be
    num_slices = slices_x * slices_y

    # Find the smallest slice size scaler which lets this size fit into 8 bits
    slice_size_scaler = max(
        get_safe_lossy_hq_slice_size_scaler(picture_bytes, num_slices),
        minimum_slice_size_scaler,
    )

    # Work out the total bitstream space available after slice overheads are
    # accounted for (NB: 4 bytes overhead per slice due to qindex and
    # slice_{y,c1,c2}_length fields).
    total_coeff_bytes = picture_bytes - (num_slices * 4)

    if total_coeff_bytes < 0:
        raise InsufficientHQPictureBytesError()

    # We'll repurpose slice_bytes (13.5.3.2) to compute the number of
    # slice_size_scaler bytes available for transform coefficients in each
    # picture slice. (i.e. it won't compute the size of the slice in bytes but
    # the number of slice_size_scaler bytes available for transform
    # coefficients in each slice).
    state = State(
        slices_x=slices_x,
        slices_y=slices_y,
        slice_bytes_numerator=total_coeff_bytes,
        slice_bytes_denominator=num_slices * slice_size_scaler,
    )

    transform_data = TransformData(hq_slices=[])
    for sy, transform_coeffs_row in enumerate(transform_coeffs):
        for sx, transform_coeffs_slice in enumerate(transform_coeffs_row):
            # NB: Actually calculates multiples of slice_size_scaler bytes
            # after all length/qindex fields accounted for. See comment above
            # "state = State(".
            total_length = slice_bytes(state, sx, sy)
            target_size = 8 * slice_size_scaler * total_length

            # Quantize each slice to fit
            qindex, (y_transform, c1_transform,
                     c2_transform) = quantize_to_fit(
                         target_size,
                         transform_coeffs_slice,
                         8 * slice_size_scaler,
                         minimum_qindex,
                     )
            transform_data["hq_slices"].append(
                make_hq_slice(
                    y_transform,
                    c1_transform,
                    c2_transform,
                    total_length,
                    qindex,
                    slice_size_scaler,
                ))

    return slice_size_scaler, transform_data