Example #1
0
from clustering import Clustering

my_clustering = Clustering()

# ConfiguraciĆ³n para ejecutar el proyecto.
my_clustering.start('media/video.mp4', 'input', 'output', 'frames_videos', 30,
                    15, [0, 256])
    def patching_frame(yuv_wt, yuv_org, blocksize):

        logger.info("Patching................")
        # validation, the size of yuv should dividable by blocksize**2
        if yuv_wt.file_size != yuv_org.file_size:
            logger.error("the data size is not the same")
            return

        if yuv_wt.file_size % (blocksize**2) != 0 or yuv_org.file_size % (
                blocksize**2) != 0:
            logger.error(
                "data size: frame1 {0},  frame2 {1} are not divisible by blocksize {2} "
                .format(yuv_wt.file_size, yuv_org.file_size, blocksize))
            return

        # create the output mse array
        if yuv_org.bytes_per_sample == 1:
            mse_array = array.array('B')
        elif yuv_org.bytes_per_sample == 2:
            mse_array = array.array('H')

        w = yuv_org.YUV_width
        h = yuv_org.YUV_height
        # only take the y channel
        mse_array = yuv_org.YUV_data[:w * h]
        logger.debug("the original mse_array size is {0}".format(
            len(mse_array)))
        # left to right and up to bottom, generate a y' with mse

        w_blocks = w / blocksize
        h_blocks = h / blocksize

        scaledown_blocks = 0
        scaleup_blocks = 0
        normal_blocks = 0

        for w_j in xrange(0, w_blocks):
            for h_i in xrange(0, h_blocks):
                block_wt = []
                block_org = []
                factor = 1
                num_clusters = 0
                # the watermarked added to the frame
                block_noisy = []
                for k in range(0, blocksize):
                    startaddr = (h_i * blocksize + k) * w + w_j * blocksize
                    block_wt.extend(yuv_wt.YUV_data[startaddr:(startaddr +
                                                               blocksize)])
                    block_org.extend(yuv_org.YUV_data[startaddr:(startaddr +
                                                                 blocksize)])

                block_noisy = [
                    pixel_wt - pixel_org
                    for pixel_wt, pixel_org in zip(block_wt, block_org)
                ]

                logger.debug("org is ")
                for i in xrange(0, blocksize * blocksize, blocksize):
                    logger.debug(block_org[i:i + blocksize])
                logger.debug("watermarked is")
                for i in xrange(0, blocksize * blocksize, blocksize):
                    logger.debug(block_wt[i:i + blocksize])

                #################################
                # update watermark based on MAX_AD
                ################################
                # luma range
                # for 10 bit? it should adapt to 8 bit
                luma_min = 1
                luma_max = 2047

                L_Diff = 0
                H_Diff = 0
                MAX_L_H_AD = 0
                pixel_luma_min = luma_max
                pixel_luma_max = luma_min

                block_sum = sum(block_org)
                block_average = block_sum * 1.0 / len(block_org)
                # average_diversity = sum([abs(pixel_i - block_average) for pixel_i in block_org]) / len(block_org)

                block_SD_wt = VQM.SD(block_wt)
                block_SD_org = VQM.SD(block_org)
                block_SD_noisy = VQM.SD(block_noisy)
                # since we use average_diversity

                for (pixel_changed, pixel_org) in zip(block_wt, block_org):
                    #    Diff = pixel_changed - pixel_org
                    #    L_Diff = Diff if Diff < L_Diff else L_Diff
                    #    H_Diff = Diff if Diff > H_Diff else H_Diff
                    pixel_luma_min = pixel_org if pixel_org < pixel_luma_min else pixel_luma_min
                    pixel_luma_max = pixel_org if pixel_org > pixel_luma_max else pixel_luma_max

                # AD = (H_Diff - L_Diff)*1.0/2

                # AD/max_AD to scale down the changes
                # show we only scale down the change on a pixel or entire block?
                # compute the avg of block. if it is quite even, we should use a lower MAX_AD
                # Otherwise we should use a higher MAX_AD
                # the constant value is based on experiment now

                luma_low_boundary = 60
                luma_high_boundary = 600

                # average_diversity = 2 if average_diversity < 2 else average_diversity

                # exception: for some block, even they have a high diversity however, the data are sparse distributed
                # for example, in a range of [0, 100], data are all in the 0, or 100. Such block is not good for hiding data

                if block_average < luma_high_boundary:
                    if block_average < luma_low_boundary:
                        #         # MAX_L_H_AD = 0.1*math.exp(2.28 * math.log(block_average)/3.09)
                        MAX_L_H_AD = block_average * 0.025

                    else:
                        MAX_L_H_AD = block_average * 0.0025
                else:
                    #     #MAX_L_H_AD = 0.1*math.exp(1.1*math.log(block_average)/1.5)
                    MAX_L_H_AD = block_average * 0.025

                # should be adjusted by average diversity
                # Max_L_H_AD should be different for the extremely low or high luma
                # The constant value is from experiments (Digital video Quality)
                # for now we get the range from experiment. It is from 3-660
                # slight change it to 500

                # for the watermarked y < 3, the incr or decr should be e^(2.28*logL/3.09)
                # for the watermarked 660 > y > 3, the incr or decr should be 0.01*L
                # for the watermarked y > 660, the incr or decr could be e^(1.1*logL/1.5)

                # To Do: how to adjust it by frequency ?
                # We`` can consider

                # MAX_L_H_D should not cause the luma < luma_min or over luma_max

                logger.debug(" block {0} {1}".format(w_j, h_i))
                #logger.debug("[AD between WM and ref, MAX_L_H_AD, block_average, average_diversity] is [{0},{1}, {2}, {3}]".format(AD, MAX_L_H_AD, block_average, average_diversity))
                logger.debug(
                    "[WM_SD, org_SD, noisy_SD, MAX_L_H_AD, block_average,] is [{0},{1}, {2}, {3}, {4}]"
                    .format(block_SD_wt, block_SD_org, block_SD_noisy,
                            MAX_L_H_AD, block_average))
                # MAX_L_H_AD_1 = (pixel_luma_min -luma_min) if (pixel_luma_min - MAX_L_H_AD) < luma_min else MAX_L_H_AD
                # MAX_L_H_AD_2 = (luma_max -pixel_luma_max) if (pixel_luma_max + MAX_L_H_AD) > luma_max else MAX_L_H_AD

                # MAX_L_H_AD = MAX_L_H_AD_1 if MAX_L_H_AD_1 < MAX_L_H_AD_2 else MAX_L_H_AD_2

                # num_groups = DBScan()
                if block_SD_org < 1:
                    block_SD_org = 1
                if block_SD_noisy < 1:
                    block_SD_noisy = 1
                if block_SD_org < 10:
                    # logger.debug("[{0}, {1}] in the safe range.".format(w_j, h_i))
                    scaledown_blocks += 1
                    factor = 1 / block_SD_noisy
                else:
                    # in most case, the SD_wt should large than SD_org.
                    # if SD_wt < SD_org, it shows the org data are all located at both end of range.
                    # if org data is distribution in the block is normal
                    # current this is most difficult case
                    # we may able to increase this part
                    # if block_SD_noisy
                    # we scaledown blocks whose block_SD_org is very high
                    block_org_2d = []
                    for i in xrange(0, len(block_org), 16):
                        block_org_2d.append(block_org[i:i + 16])
                    cluster_func = Clustering(block_org_2d, 4)
                    cluster_func.start()
                    num_clusters = len(cluster_func.clusters)
                    if num_clusters < 28:
                        factor = 1 / block_SD_noisy
                        scaledown_blocks += 1
                    else:
                        factor = 1.05
                        scaleup_blocks += 1
                #if AD is 0. The watermarked frame did not change anything

                #if AD > MAX_L_H_AD:
                #    factor = MAX_L_H_AD * 1.0 / AD
                #elif AD == 0:
                #    factor = 0
                #else:
                #    factor = 1.01
                # we need scale down the watermark
                # assign the mse to the array

                logger.debug(
                    "normalized MAX_L_H_AD is {0} and factor is {1}, number of clusters is {2}"
                    .format(MAX_L_H_AD, factor, num_clusters))

                for h_k in range(0, blocksize):
                    startaddr = (h_i * blocksize + h_k) * w + w_j * blocksize
                    for block_k in range(0, blocksize):
                        pixel = block_org[h_k * blocksize + block_k]
                        mse_array[startaddr + block_k] = int(
                            pixel +
                            (block_wt[h_k * blocksize + block_k] - pixel) *
                            factor)
                    logger.debug(mse_array[startaddr:startaddr + blocksize])


#                else:
#                    # we should keep the wm block
#                    for h_k in range(0, blocksize):
#                        startaddr = (h_i * blocksize + h_k) * w + w_j * blocksize
#                        block_row = h_k * blocksize
#                        for block_k in range(0, blocksize):
#                            mse_array[startaddr + block_k] = block_wt[block_row + block_k]
# create a mse array and return it
#logger.debug("the size of mse_array is {0}".format(len(mse_array)))
        logger.info("{0} blocks ({1}%) are scaledowned.".format(
            scaledown_blocks, scaledown_blocks * 100 / (w_blocks * h_blocks)))
        logger.info("{0} blocks ({1}%) are scaleuped.".format(
            scaleup_blocks, scaleup_blocks * 100 / (w_blocks * h_blocks)))
        logger.info("{0} blocks ({1}%) are kept.".format(
            normal_blocks, normal_blocks * 100 / (w_blocks * h_blocks)))
        logger.info("#### end of patching")
        return mse_array