示例#1
0
def blockhash(aInputImage, lHashSize=256, bFlipHandling=False, bDebug=False):
    dImgDimension = math.sqrt(lHashSize)
    if not float(dImgDimension).is_integer():
        raise Exception("The squareroot of the hash size has to be an int")
    lImgDimension = int(dImgDimension)
    if __is_odd(lImgDimension):
        raise Exception("The squareroot of the hash size has to be even")

    # fliphandling
    if bFlipHandling:
        aInputImage = fliphandling.handle_flip(aInputImage)

    if bDebug:
        __show_image(aInputImage)
    aGrayScaleImage = __convert_image_to_grayscale(aInputImage)
    aWorkingImage = __resize_image(aGrayScaleImage, lImgDimension,
                                   lImgDimension)

    lHeight, lWidth = aWorkingImage.shape
    # you can use it here if your blocks are single pixels only
    dMedianValue = np.median(aWorkingImage)

    aBlockHash = np.zeros((lImgDimension, lImgDimension), dtype=bool)

    for x in range(lHeight):
        for y in range(lWidth):
            # if block is bigger than one pixel, use mean of block
            if aWorkingImage[x, y] >= dMedianValue:
                aBlockHash[x, y] = True

    return aBlockHash.flatten()
示例#2
0
def average_hash(image, hash_size=8, bFlipHandling=False):
    """
    Average Hash computation

    Implementation follows http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html

    Step by step explanation: https://www.safaribooksonline.com/blog/2013/11/26/image-hashing-with-python/

    @image must be a PIL instance.
    """
    if hash_size < 0:
        raise ValueError("Hash size must be positive")

    # fliphandling
    if bFlipHandling:
        image = fliphandling.handle_flip(image)

    # reduce size and complexity, then covert to grayscale
    # image = image.convert("L").resize((hash_size, hash_size),
    # Image.ANTIALIAS)
    image = __convert_image_to_grayscale(image)
    pixels = __resize_image_downscale(image, hash_size, hash_size)

    # find average pixel value; 'pixels' is an array of the pixel values,
    # ranging from 0 (black) to 255 (white)
    # pixels = numpy.array(image.getdata()).reshape((hash_size, hash_size))
    avg = pixels.mean()

    # create string of bits
    diff = pixels > avg
    # make a hash
    return diff.flatten()
示例#3
0
def dhash_vertical(image, hash_size=8, bFlipHandling=False):
    """
    Difference Hash computation.

    following http://www.hackerfactor.com/blog/index.php?/archives/529-Kind-of-Like-That.html

    computes differences vertically

    @image must be a PIL instance.
    """
    # fliphandling
    if bFlipHandling:
        image = fliphandling.handle_flip(image)

    # resize(w, h), but numpy.array((h, w))
    image = __convert_image_to_grayscale(image)
    pixels = __resize_image_downscale(image, hash_size,
                                      hash_size + 1)  # TODO: right order?
    # compute differences between rows
    diff = pixels[1:, :] > pixels[:-1, :]
    return diff.flatten()
示例#4
0
def phash_simple(image, hash_size=8, highfreq_factor=4, bFlipHandling=False):
    """
    Perceptual Hash computation.

    Implementation follows http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html

    @image must be a PIL instance.
    """

    # fliphandling
    if bFlipHandling:
        image = fliphandling.handle_flip(image)

    import scipy.fftpack
    img_size = hash_size * highfreq_factor
    image = __convert_image_to_grayscale(image)
    pixels = __resize_image_downscale(image, img_size, img_size)
    dct = scipy.fftpack.dct(pixels)
    dctlowfreq = dct[:hash_size, 1:hash_size + 1]
    avg = dctlowfreq.mean()
    diff = dctlowfreq > avg
    return diff.flatten()
示例#5
0
def dhash(image, hash_size=8, bFlipHandling=False):
    """
    Difference Hash computation.

    following http://www.hackerfactor.com/blog/index.php?/archives/529-Kind-of-Like-That.html

    computes differences horizontally

    @image must be a PIL instance.
    """
    # resize(w, h), but numpy.array((h, w))
    if hash_size < 0:
        raise ValueError("Hash size must be positive")

    # fliphandling
    if bFlipHandling:
        image = fliphandling.handle_flip(image)

    image = __convert_image_to_grayscale(image)
    pixels = __resize_image_downscale(image, hash_size + 1, hash_size)
    # compute differences between columns
    diff = pixels[:, 1:] > pixels[:, :-1]
    return diff.flatten()
示例#6
0
def wuhash(aInputImage,
           lMaxSideSize=500,
           bRotationHandling=False,
           bFlipHandling=False,
           bDebug=False):

    # fliphandling
    if bFlipHandling:
        aInputImage = fliphandling.handle_flip(aInputImage)

    if bDebug:
        import matplotlib.pyplot as plt
        fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,
                                                                 2,
                                                                 figsize=(10,
                                                                          15))

    # handling of to big images - custom made - not part of the algo
    lImgHeight = aInputImage.shape[0]
    lImgWidth = aInputImage.shape[1]
    if lImgHeight > lMaxSideSize or lImgWidth > lMaxSideSize:
        #print("imput image to big for radon transform, performing downscale")
        dScaleFactor = lMaxSideSize / max(lImgHeight, lImgWidth)
        aInputImage = __downscale_image(aInputImage, dScaleFactor=dScaleFactor)

    aGrayScaleImage = __convert_image_to_grayscale(aInputImage)

    # ----- step 1 :: Radon transform
    aTheta = np.arange(180)
    aSinogram = radon(aGrayScaleImage, theta=aTheta, circle=False)
    aSinogram = __remove_zero_rows(aSinogram)

    if bDebug:
        ax1.set_title("Sinogram")
        ax1.imshow(aSinogram, cmap=plt.cm.Greys_r, aspect='auto')
        ax1.set_xlabel(r"$\theta$")
        ax1.set_ylabel(r"$\rho$")
        ax1.set_xticks(np.arange(0, 181, 15))

    # --- rotation handling
    if bRotationHandling:
        aSinogram = __rotation_handling(aSinogram)

    if bDebug:
        ax2.set_title("Sinogram rotated")
        ax2.imshow(aSinogram, cmap=plt.cm.Greys_r, aspect='auto')
        ax2.set_xlabel(r"$\theta$")
        ax2.set_ylabel(r"$\rho$")
        ax2.set_xticks(np.arange(0, 181, 15))

    # ------ step 2 :: 800 Blocks - mean value

    aMeanBlocks = __resize_image(aSinogram, 20, 40)
    # Note: this is not the original implementation, but it is very close to it

    if bDebug:
        ax3.set_title("Mean Blocks")
        ax3.imshow(aMeanBlocks, cmap=plt.cm.Greys_r, aspect='auto')

    # ---------- step 3 :: 2 level haar wavelet transform
    aWaveletHeightFrequencies = np.zeros((20, 20))

    for nr_col in range(aMeanBlocks.shape[1]):
        # take high frequency part only
        aWaveletHeightFrequencies[:,
                                  nr_col] = pywt.wavedec(aMeanBlocks[:,
                                                                     nr_col],
                                                         "Haar",
                                                         level=2)[2]

    if bDebug:
        ax4.set_title("Wavelet\n(high frequency components)")
        ax4.imshow(aWaveletHeightFrequencies,
                   cmap=plt.cm.Greys_r,
                   aspect='auto')

    # ----- step 4 fft reals part
    aFFT = np.zeros(aWaveletHeightFrequencies.shape)
    for nr_col in range(aWaveletHeightFrequencies.shape[1]):
        aFFT[:,
             nr_col] = np.real(np.fft.fft(aWaveletHeightFrequencies[:,
                                                                    nr_col]))

    if bDebug:
        ax5.set_title("FFT\n(reals components)")
        ax5.imshow(aFFT, cmap=plt.cm.Greys_r, aspect='auto')

    # ----- step 6 - calculate mean and hash

    lFFTHeight, lFFTWidth = aFFT.shape
    aHashBlock = np.zeros(aFFT.shape, dtype=bool)

    #dMeanThreshold = np.mean(aFFT)
    for nr_col in range(lFFTWidth):
        dMeanThreshold = np.mean(aFFT[:, nr_col])
        for nr_row in range(lFFTHeight):
            if aFFT[nr_row, nr_col] >= dMeanThreshold:
                aHashBlock[nr_row, nr_col] = 1

    if bDebug:
        ax6.set_title("Hash")
        ax6.imshow(aHashBlock, cmap=plt.cm.Greys_r, aspect='auto')

    if bDebug:
        fig.tight_layout()
        plt.show()
        plt.savefig("test_wu_hash.png")

    # return the hash flattened columnwise
    return aHashBlock.flatten('F')
示例#7
0
def whash(image,
          hash_size=8,
          image_scale=None,
          mode='haar',
          remove_max_haar_ll=True,
          bFlipHandling=False):
    """
    Wavelet Hash computation.

    based on https://www.kaggle.com/c/avito-duplicate-ads-detection/

    @image must be a PIL instance.
    @hash_size must be a power of 2 and less than @image_scale.
    @image_scale must be power of 2 and less than image size. By default is equal to max
            power of 2 for an input image.
    @mode (see modes in pywt library):
            'haar' - Haar wavelets, by default
            'db4' - Daubechies wavelets
    @remove_max_haar_ll - remove the lowest low level (LL) frequency using Haar wavelet.
    """
    # fliphandling
    if bFlipHandling:
        image = fliphandling.handle_flip(image)

    import pywt
    if image_scale is not None:
        assert image_scale & (image_scale -
                              1) == 0, "image_scale is not power of 2"
    else:
        # TODO: correct translation? vvvv
        image_natural_scale = 2**int(numpy.log2(min(image.shape[:2])))
        image_scale = max(image_natural_scale, hash_size)

    ll_max_level = int(numpy.log2(image_scale))

    level = int(numpy.log2(hash_size))
    assert hash_size & (hash_size - 1) == 0, "hash_size is not power of 2"
    assert level <= ll_max_level, "hash_size in a wrong range"
    dwt_level = ll_max_level - level

    image = __convert_image_to_grayscale(image)
    pixels = __resize_image_downscale(image, image_scale, image_scale)
    pixels = pixels.astype(float)
    pixels /= 255

    # Remove low level frequency LL(max_ll) if @remove_max_haar_ll using haar
    # filter
    if remove_max_haar_ll:
        coeffs = pywt.wavedec2(pixels, 'haar', level=ll_max_level)
        coeffs = list(coeffs)
        coeffs[0] *= 0
        pixels = pywt.waverec2(coeffs, 'haar')

    # Use LL(K) as freq, where K is log2(@hash_size)
    coeffs = pywt.wavedec2(pixels, mode, level=dwt_level)
    dwt_low = coeffs[0]

    # Substract median and compute hash
    med = numpy.median(dwt_low)
    diff = dwt_low > med
    return diff.flatten()