Пример #1
0
def main():


    input_file = '/home/zsa/bird2.bmp'
    output_file = '/home/zsa/ttt'

    image = cv2.imread(input_file)
    image = cv2.resize(image, (448, 448))
    ycbcr = cv2.cvtColor(image,cv2.COLOR_RGB2YCrCb)

    npmat = np.array(ycbcr, dtype=np.uint8)

    rows, cols = npmat.shape[0], npmat.shape[1]

    # block size: 8x8
    if rows % 8 == cols % 8 == 0:
        blocks_count = rows // 8 * cols // 8
    else:
        raise ValueError(("the width and height of the image "
                          "should both be mutiples of 8"))

    # dc is the top-left cell of the block, ac are all the other cells
    dc = np.empty((blocks_count, 3), dtype=np.int32)
    ac = np.empty((blocks_count, 63, 3), dtype=np.int32)

    for i in range(0, rows, 8):
        for j in range(0, cols, 8):
            try:
                block_index += 1
            except NameError:
                block_index = 0

            for k in range(3):
                # split 8x8 block and center the data range on zero
                # [0, 255] --> [-128, 127]
                block = npmat[i:i+8, j:j+8, k] - 128

                dct_matrix = dct_2d(block)
                quant_matrix = quantize(dct_matrix,
                                        'lum' if k == 0 else 'chrom')
                zz = block_to_zigzag(quant_matrix)

                dc[block_index, k] = zz[0]
                ac[block_index, :, k] = zz[1:]

    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
            flatten(run_length_encode(ac[i, :, 0])[0]
                    for i in range(blocks_count)))
    H_AC_C = HuffmanTree(
            flatten(run_length_encode(ac[i, :, j])[0]
                    for i in range(blocks_count) for j in [1, 2]))

    tables = {'dc_y': H_DC_Y.value_to_bitstring_table(),
              'ac_y': H_AC_Y.value_to_bitstring_table(),
              'dc_c': H_DC_C.value_to_bitstring_table(),
              'ac_c': H_AC_C.value_to_bitstring_table()}

    write_to_file(output_file, dc, ac, blocks_count, tables)
Пример #2
0
def main(input_file, output_file):

    image = Image.open(input_file)

    ycbcr = image.convert('YCbCr')

    npmat = np.array(ycbcr, dtype=np.uint8)

    rows, cols = npmat.shape[0], npmat.shape[1]

    # block size: 8x8
    if rows % 8 == cols % 8 == 0:
        blocks_count = rows // 8 * cols // 8
    else:
        raise ValueError(("the width and height of the image "
                          "should both be mutiples of 8"))

    # dc is the top-left cell of the block, ac are all the other cells
    dc = np.empty((blocks_count, 3), dtype=np.int32)
    ac = np.empty((blocks_count, 63, 3), dtype=np.int32)

    for i in range(0, rows, 8):
        for j in range(0, cols, 8):
            try:
                block_index += 1
            except NameError:
                block_index = 0

            for k in range(3):
                # split 8x8 block and center the data range on zero
                block = npmat[i:i + 8, j:j + 8, k] - 128
                # discrete cosine block transform
                dct_matrix = dct_2d(block)
                # block quantization
                quant_matrix = quantize(dct_matrix,
                                        'lum' if k == 0 else 'chrom')
                # get an array of coefficients
                zz = zigzag_traversal(block_to_zigzag(quant_matrix))

                dc[block_index, k] = zz[0]
                ac[block_index, :, k] = zz[1:]
    # сreate huffman trees separately for 'dc_y', 'ac_y', 'dc_c', 'ac_c'
    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, 0])[0] for i in range(blocks_count)))
    H_AC_C = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, j])[0] for i in range(blocks_count)
            for j in [1, 2]))
    # final tables for blocks
    tables = {
        'dc_y': H_DC_Y.value_to_bitstring_table(),
        'ac_y': H_AC_Y.value_to_bitstring_table(),
        'dc_c': H_DC_C.value_to_bitstring_table(),
        'ac_c': H_AC_C.value_to_bitstring_table()
    }

    write_to_file(output_file, dc, ac, blocks_count, tables)
Пример #3
0
def JPEG_encoder(input_image):

    imsize = input_image.shape
    #dct = np.zeros(imsize)

    image = cv2.cvtColor(input_image, cv2.COLOR_RGB2YCrCb)

    rows, cols = imsize[0], imsize[1]
    blocks_count = rows // 8 * cols // 8

    # dc is the top-left cell of the block, ac are all the other cells
    dc = np.empty((blocks_count, 3), dtype=np.int32)
    ac = np.empty((blocks_count, 63, 3), dtype=np.int32)

    for i in range(0, rows, 8):
        for j in range(0, cols, 8):
            try:
                block_index += 1
            except NameError:
                block_index = 0

            for k in range(3):
                # split 8x8 block and center the data range on zero
                # [0, 255] --> [-128, 127]
                block = image[i:i + 8, j:j + 8, k] - 128
                # Block based DCT
                dct_matrix = dct2(block)
                # Quantization
                # zonal coding
                quant_matrix = quantize(dct_matrix,
                                        'lum' if k == 0 else 'chrom')
                zz = block_to_zigzag(quant_matrix)

                dc[block_index, k] = zz[0]
                ac[block_index, :, k] = zz[1:]

    # Huffman
    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, 0])[0] for i in range(blocks_count)))
    H_AC_C = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, j])[0] for i in range(blocks_count)
            for j in [1, 2]))

    tables = {
        'dc_y': H_DC_Y.value_to_bitstring_table(),
        'ac_y': H_AC_Y.value_to_bitstring_table(),
        'dc_c': H_DC_C.value_to_bitstring_table(),
        'ac_c': H_AC_C.value_to_bitstring_table()
    }

    return dc, ac, blocks_count, tables
Пример #4
0
def main():
    start = datetime.datetime.now()
    parser = argparse.ArgumentParser()
    parser.add_argument("input", help="path to the input image")
    # parser.add_argument("output", help="path to the output image")
    args = parser.parse_args()

    input_file = args.input
    # output_file = args.output
    tole = len(input_file)
    poi = 0
    for i in input_file:
        if i != ".":
            poi += 1
        else:
            break
    exte = input_file[poi + 1:]
    print("exte : ", exte)
    image = Image.open(input_file)
    input_file = input_file[:poi]
    or_img = img2arr(image)
    print("original image shape : ", or_img.shape)
    ycbcr = image.convert('YCbCr')
    npmat = np.array(ycbcr, dtype=np.uint8)
    rows, cols = npmat.shape[0], npmat.shape[1]
    orows, ocols = rows, cols
    print("old shape : ", orows, " * ", ocols)
    rows = int(rows / 8) * 8
    cols = int(cols / 8) * 8
    # npmat.reshape((rows, cols, 3)) WRONG
    npmat = npmat[0:rows, 0:cols, :]
    print("new shape : ", npmat.shape[0], " * ", npmat.shape[1])

    # block size: 8x8
    """
    if rows % 8 == cols % 8 == 0:
        blocks_count = rows // 8 * cols // 8
    else:
    	if rows % 8 != 0 and cols % 8 != 0:
    		blocks_count = int(rows / 8) * int(cols / 8)
    """
    print(rows / 8, cols / 8, int(rows / 8), int(cols / 8))
    blocks_count = int(rows / 8) * int(cols / 8)

    # raise ValueError(("the width and height of the image should both be mutiples of 8"))
    print("blocks_count : ", blocks_count)
    # dc is the top-left cell of the block, ac are all the other cells
    dc = np.empty((blocks_count, 3), dtype=np.int32)
    ac = np.empty((blocks_count, 63, 3), dtype=np.int32)
    print("rows", rows, " cols ", cols)
    for i in range(0, rows, 8):
        for j in range(0, cols, 8):
            try:
                block_index += 1
            except NameError:
                block_index = 0

            for k in range(3):
                # split 8x8 block and center the data range on zero
                # [0, 255] --> [-128, 127]
                block = npmat[i:i + 8, j:j + 8, k] - 128

                dct_matrix = fftpack.dct(block, norm='ortho')
                quant_matrix = quantize(dct_matrix,
                                        'lum' if k == 0 else 'chrom')
                # print("P")
                zz = block_to_zigzag(quant_matrix)
                # print("Q")

                dc[block_index, k] = zz[0]
                ac[block_index, :, k] = zz[1:]
        # print("ENCODING_Outer")
    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, 0])[0] for i in range(blocks_count)))
    H_AC_C = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, j])[0] for i in range(blocks_count)
            for j in [1, 2]))

    tables = {
        'dc_y': H_DC_Y.value_to_bitstring_table(),
        'ac_y': H_AC_Y.value_to_bitstring_table(),
        'dc_c': H_DC_C.value_to_bitstring_table(),
        'ac_c': H_AC_C.value_to_bitstring_table()
    }

    # print("B")
    print("ENCODING DONE................................................")
    print("time passed : ", ((datetime.datetime.now() - start).seconds) / 60,
          " minutes")
    # write_to_file(output_file, dc, ac, blocks_count, tables)
    # print("C")
    # assuming that the block is a 8x8 square
    block_side = 8

    # assuming that the image height and width are equal
    # image_side = int(math.sqrt(blocks_count)) * block_side
    # rows = 672
    # cols = 1200

    # blocks_per_line = image_side // block_side

    npmat = np.empty(or_img.shape, dtype=np.uint8)
    """
    for block_index in range(blocks_count):
        i = block_index // blocks_per_line * block_side
        j = block_index % blocks_per_line * block_side

        for c in range(3):
            zigzag = [dc[block_index, c]] + list(ac[block_index, :, c])
            quant_matrix = zigzag_to_block(zigzag)
            dct_matrix = dequantize(quant_matrix, 'lum' if c == 0 else 'chrom')
            block = fftpack.idct(dct_matrix, norm='ortho')
            npmat[i:i+8, j:j+8, c] = block + 128
    """
    # block_index = 0
    i, j = 0, 0
    print("rows : ", rows, " cols : ", cols)
    for i in range(0, rows, 8):
        # print("DECODING_Outer")
        for j in range(0, cols, 8):
            try:
                block_index1 += 1
            except NameError:
                block_index1 = 0

            for c in range(3):
                zigzag = [dc[block_index1, c]] + list(ac[block_index1, :, c])
                quant_matrix = zigzag_to_block(zigzag)
                dct_matrix = dequantize(quant_matrix,
                                        'lum' if c == 0 else 'chrom')
                block = fftpack.idct(dct_matrix, norm='ortho')
                npmat[i:i + 8, j:j + 8, c] = block + 128

    image = Image.fromarray(npmat, 'YCbCr')
    image = image.convert('RGB')
    npmat[-(orows - rows):, -(ocols - cols):, :] = or_img[-(orows - rows):,
                                                          -(ocols - cols):, :]
    # image.show()
    print("DONE. time passed : ",
          ((datetime.datetime.now() - start).seconds) / 60, " minutes")
    output_file = input_file + "_opti_by_pkikani." + exte
    image.save(output_file)
Пример #5
0
    # =========================
    # 허프만 부호화
    # =========================
    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, 0])[0] for i in range(blocks_count)))
    H_AC_C = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, j])[0] for i in range(blocks_count)
            for j in [1, 2]))

    tables = {
        'dc_y': H_DC_Y.value_to_bitstring_table(),
        'ac_y': H_AC_Y.value_to_bitstring_table(),
        'dc_c': H_DC_C.value_to_bitstring_table(),
        'ac_c': H_AC_C.value_to_bitstring_table()
    }

    print('dc_y')
    print(tables['dc_y'])
    print('ac_y')
    print(tables['ac_y'])
    print('dc_c')
    print(tables['dc_c'])
    print('ac_c')
    print(tables['ac_c'])

    # =========================
Пример #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("input", help="path to the input image")
    parser.add_argument("output", help="path to the output image")
    args = parser.parse_args()

    input_file = args.input
    output_file = args.output

    image = Image.open(input_file)
    ycbcr = image.convert('YCbCr')

    npmat = np.array(ycbcr, dtype=np.uint8)

    rows, cols = npmat.shape[0], npmat.shape[1]

    # block size: 8x8
    if rows % 8 == cols % 8 == 0:
        blocks_count = rows // 8 * cols // 8
    else:
        raise ValueError(("the width and height of the image "
                          "should both be mutiples of 8"))

    # dc is the top-left cell of the block, ac are all the other cells
    dc = np.empty((blocks_count, 3), dtype=np.int32)
    ac = np.empty((blocks_count, 63, 3), dtype=np.int32)

    for i in range(0, rows, 8):
        for j in range(0, cols, 8):
            try:
                block_index += 1
            except NameError:
                block_index = 0

            for k in range(3):
                # split 8x8 block and center the data range on zero
                # [0, 255] --> [-128, 127]
                block = npmat[i:i + 8, j:j + 8, k] - 128

                dct_matrix = fftpack.dct(block, norm='ortho')
                quant_matrix = quantize(dct_matrix,
                                        'lum' if k == 0 else 'chrom')
                zz = block_to_zigzag(quant_matrix)

                dc[block_index, k] = zz[0]
                ac[block_index, :, k] = zz[1:]

    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, 0])[0] for i in range(blocks_count)))
    H_AC_C = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, j])[0] for i in range(blocks_count)
            for j in [1, 2]))

    tables = {
        'dc_y': H_DC_Y.value_to_bitstring_table(),
        'ac_y': H_AC_Y.value_to_bitstring_table(),
        'dc_c': H_DC_C.value_to_bitstring_table(),
        'ac_c': H_AC_C.value_to_bitstring_table()
    }

    write_to_file(output_file, dc, ac, blocks_count, tables)
Пример #7
0
def main():
    print("请在命令行运行此程序")
    #imageName = input("Choose Image you want encode : ")
    imageName = raw_input("Choose Image you want encode : ")
    im = Image.open(imageName)
    image = im.resize((800, 800))  # 将图片定义为固定尺寸,方便进行图片操作,将图片切成8*8的快
    ycbcr = image.convert('YCbCr')  # 一、颜色模式转化
    # 图像矩阵化
    image_arr = np.array(image)
    npmat = np.array(ycbcr, dtype=np.uint8)  # 获取图像的亮度、色度与饱和度
    width, height = image_arr.shape[0], image_arr.shape[1]
    rows, cols = npmat.shape[0], npmat.shape[1]
    blocks_count = rows / 8 * cols / 8
    """
    # 颜色转化,将RGB转换为YCbCr
    Y = numpy.ndarray(shape=(width, height), dtype=np.int16)
    Cb = numpy.ndarray(shape=(width, height), dtype=np.int16)
    Cr = numpy.ndarray(shape=(width, height), dtype=np.int16)
    for i in range(width):
        for j in range(height):
            Image_Matrix = image_arr[i][j]
            Y[i][j] = 0.299 * Image_Matrix[0] + 0.587 * Image_Matrix[1] + 0.114 * Image_Matrix[2]
            Cb[i][j] = -0.1687 * Image_Matrix[0] - 0.3313 * Image_Matrix[1] + 0.5 * Image_Matrix[2] + 128
            Cr[i][j] = 0.5 * Image_Matrix[0] - 0.418 * Image_Matrix[1] - 0.0813 * Image_Matrix[2] + 128

    # 色彩二度采样 将YCbCr转化为Y:U:V 4:2 :0 形式,y不变,U,V分别变为原来的1/4
    half_u = numpy.ndarray(shape=(width, height / 2), dtype=np.int16)
    half_v = numpy.ndarray(shape=(width, height / 2), dtype=np.int16)
    qut_u = numpy.ndarray(shape=(width / 2, height / 2), dtype=np.int16)
    qut_V = numpy.ndarray(shape=(width / 2, height / 2), dtype=np.int16)
    for m in range(height / 2):
        half_u[:, m] = Cb[:, m * 2]  # 从第0行开始,隔一行出现Cb
        half_v[:, m] = Cr[:, m * 2 + 1]  # 从第1行开始,隔一行出现Cr

    for n in range(width / 2):
        qut_u[n, :] = half_u[n * 2, :]  # 隔一列出现CbCr       实现 4:2:0效果
        qut_V[n, :] = half_v[n * 2, :]

    # 采样后YUV图像
    YUV = numpy.ndarray(shape=(width, height, 3), dtype=np.int16)
    # 组成YUV
    for i in range(rows):
        for j in range(cols):
            YUV[i][j][0] = Y[i][j]  # Y不变
            if (j % 2 == 0) & (i % 2 == 0):
                YUV[i][j][1] = qut_u[i / 2, j / 2]  # U每隔四个点出现一次
            if (j % 2 == 0) & (i % 2 != 0):
                YUV[i][j][2] = qut_V[i / 2, j / 2]  # U每隔四个点出现一次
    """
    dc = np.empty((blocks_count, 3), dtype=np.int32)  #
    ac = np.empty((blocks_count, 63, 3), dtype=np.int32)

    for i in range(0, rows, 8):
        for j in range(0, cols, 8):
            try:
                block_index += 1
            except NameError:
                block_index = 0

            for k in range(3):
                block = npmat[i:i + 8, j:j + 8, k] - 128

                dct_matrix = dct_2d(block)
                quant_matrix = quantize(dct_matrix,
                                        'lum' if k == 0 else 'chrom')
                zz = block_to_zigzag(quant_matrix)

                dc[block_index, k] = zz[0]
                ac[block_index, :, k] = zz[1:]

    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, 0])[0] for i in range(blocks_count)))
    H_AC_C = HuffmanTree(
        flatten(
            run_length_encode(ac[i, :, j])[0] for i in range(blocks_count)
            for j in [1, 2]))

    tables = {
        'dc_y': H_DC_Y.value_to_bitstring_table(),
        'ac_y': H_AC_Y.value_to_bitstring_table(),
        'dc_c': H_DC_C.value_to_bitstring_table(),
        'ac_c': H_AC_C.value_to_bitstring_table()
    }

    Basename = os.path.basename(imageName)
    base = os.path.splitext(Basename)[0]
    write_to_file(base + ".txt", dc, ac, blocks_count, tables)
Пример #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("input", help="path to the input image")
    parser.add_argument("output", help="path to the output image")
    args = parser.parse_args()

    input_file = args.input
    output_file = args.output

    image = Image.open(input_file)
    ycbcr = image.convert(mode='YCbCr') #mode转化,JPEG 3x8-bit pixels

    #以上是第一步Color space transformation,即把RGB转换成 Y′CBCR (or, informally, YCbCr),Y'代表亮度,C代表色度
    #the Y' component represents the brightness of a pixel, and the CB and CR components represent the chrominance色度 (split into blue and red components).
    # ycbcr.show()

    npmat = np.array(ycbcr, dtype=np.uint8) #创建图像矩阵

    rows, cols = npmat.shape[0], npmat.shape[1] #取出行 列

    print('行:{} 列:{}'.format(rows,cols))


    # 分成8x8的块,这里用了简单的处理方式,要求图像必须可以整分。详情看Wikipedia
    if rows % 8 == cols % 8 == 0:
        blocks_count = rows // 8 * cols // 8
    else:
        raise ValueError(("the width and height of the image "
                          "should both be mutiples of 8"))

    '''
    补充说明:
    人类对亮度(上面的Y')的敏感程度远远大于色调和色彩饱和度(Cb和Cr),所以下一步就是缩减后面的比例,达到压缩的目的。
    '''

    # dc is the top-left cell of the block, ac are all the other cells
    # dc是直流系数,定义了基本色调,ac是交流系数。这也是dct的优势之处,聚集大量信号于一个角。
    dc = np.empty((blocks_count, 3), dtype=np.int32)
    ac = np.empty((blocks_count, 63, 3), dtype=np.int32)

    for i in range(0, rows, 8):
        for j in range(0, cols, 8):
            try:
                block_index += 1
            except NameError:
                block_index = 0

            for k in range(3):
                # 对每一个8*8的块即对其三个通道的值进行处理,变化:[0, 255] --> [-128, 127]
                block = npmat[i:i+8, j:j+8, k] - 128

                dct_matrix = fftpack.dct(block, norm='ortho') #对block进行dct变换
                #量化操作:在频域除以一个常量,然后四舍五入即可
                quant_matrix = quantize(dct_matrix,'lum' if k == 0 else 'chrom')

                zz = block_to_zigzag(quant_matrix) #编码

                dc[block_index, k] = zz[0]
                ac[block_index, :, k] = zz[1:]


    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
            flatten(run_length_encode(ac[i, :, 0])[0]
                    for i in range(blocks_count)))
    H_AC_C = HuffmanTree(
            flatten(run_length_encode(ac[i, :, j])[0]
                    for i in range(blocks_count) for j in [1, 2]))

    tables = {'dc_y': H_DC_Y.value_to_bitstring_table(),
              'ac_y': H_AC_Y.value_to_bitstring_table(),
              'dc_c': H_DC_C.value_to_bitstring_table(),
              'ac_c': H_AC_C.value_to_bitstring_table()}

    write_to_file(output_file, dc, ac, blocks_count, tables)
def main(inpath='./lenna.png', outpath='./lenna.dat', tempoutpath='./tmp.png'):
    img_bgr = cv2.imread(inpath)
    img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2YCrCb)

    n_rows, n_cols = img.shape[0], img.shape[1]

    if n_rows % 8 == n_cols % 8 == 0:
        n_blocks = n_rows // 8 * n_cols // 8
    else:
        raise ValueError(("the width and height of the image "
                          "should both be mutiples of 8"))

    dec_img = np.zeros((n_rows, n_cols, 3), dtype=np.uint8)
    dc = np.zeros((n_blocks, 3), dtype=np.int32)
    ac = np.zeros((n_blocks, 63, 3), dtype=np.int32)

    block_index = 0
    for i in range(0, n_rows, 8):
        for j in range(0, n_cols, 8):
            for k in range(3):
                block = img[i:i + 8, j:j + 8, k] - 128
                dct_block = dct_2d(block)
                quant_block = quantize(dct_block, 'lum' if k == 0 else 'chrom')
                zz = block2zigzag(quant_block)
                dc[block_index, k] = zz[0]
                ac[block_index, :, k] = zz[1:]

                dequant_block = dequantize(quant_block,
                                           'lum' if k == 0 else 'chrom')
                idct_block = idct_2d(dequant_block)
                dec_img[i:i + 8, j:j + 8,
                        k] = (idct_block + 128).round().astype(np.uint8)
            block_index += 1

    dec_img_bgr = cv2.cvtColor(dec_img, cv2.COLOR_YCrCb2BGR)
    cv2.imwrite(tempoutpath, dec_img_bgr)

    for i in range(n_blocks - 1, 0, -1):
        dc[i] -= dc[i - 1]

    # dc: length of VLI, VLI
    # ac: (run length of zero, length of VLI), VLI
    # the former stored in hummfan code

    def flatten(lst):
        return [x for sublist in lst for x in sublist]

    H_DC_Y = HuffmanTree(np.vectorize(bits_required)(dc[:, 0]))
    H_DC_C = HuffmanTree(np.vectorize(bits_required)(dc[:, 1:].flat))
    H_AC_Y = HuffmanTree(
        flatten([RLE(ac[i, :, 0])[0] for i in range(n_blocks)]))
    H_AC_C = HuffmanTree(
        flatten([
            RLE(ac[i, :, j])[0] for i in range(n_blocks) for j in range(1, 3)
        ]))

    tables = {
        'dc_y': H_DC_Y.value_to_bitstring_table(),
        'ac_y': H_AC_Y.value_to_bitstring_table(),
        'dc_c': H_DC_C.value_to_bitstring_table(),
        'ac_c': H_AC_C.value_to_bitstring_table()
    }

    res_size = JpegIO.writefile(dc, ac, n_blocks, tables, n_rows, n_cols,
                                outpath) / 8 / 8 / 1024

    psnr = skimage.measure.compare_psnr(img_bgr, dec_img_bgr, data_range=255)
    ssim = skimage.measure.compare_ssim(img_bgr,
                                        dec_img_bgr,
                                        data_range=255,
                                        multichannel=True)

    orig_size = n_rows * n_cols * 3 / 1024

    print('The size of the original image is %.2lf KB' % (orig_size))
    print('The size of the result image is %.2lf KB' % (res_size))
    print("PSNR : %lfdB" % (psnr))
    print("MS-SSIM : %lf" % (ssim))
    print("Rate : %lfbpp" % (res_size / orig_size * 8))