def get_descriptors(img, imageName, database):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img = clahe.apply(img)
    img = image_enhance.image_enhance(img)
    img = np.array(img, dtype=np.uint8)

    # Threshold
    ret, img = cv2.threshold(img, 127, 255,
                             cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)

    # Normalize to 0 and 1 range
    img[img == 255] = 1

    # Thinning
    skeleton = skeletonize(img)
    skeleton = np.array(skeleton, dtype=np.uint8)
    skeleton = removedot(skeleton)

    DescriptorList = list()
    print("0")
    for i in range(0, 399, 133):
        print("1")
        for j in range(0, 273, 91):
            blockImg = img[i:i + 133, j:j + 91]
            print("2")
            hog = cv2.HOGDescriptor()
            hog_des = hog.compute(blockImg)
            print("3")

            DescriptorList.append(hog_des)

    database.update({imageName: DescriptorList})
Esempio n. 2
0
def get_descriptors(img):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img = clahe.apply(img)
    img = image_enhance.image_enhance(img)
    img = numpy.array(img, dtype=numpy.uint8)
    # Threshold
    ret, img = cv2.threshold(img, 127, 255,
                             cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
    # Normalize to 0 and 1 range
    img[img == 255] = 1

    #Thinning
    skeleton = skeletonize(img)
    skeleton = numpy.array(skeleton, dtype=numpy.uint8)
    skeleton = removedot(skeleton)
    # Harris corners
    harris_corners = cv2.cornerHarris(img, 3, 3, 0.04)
    harris_normalized = cv2.normalize(harris_corners,
                                      0,
                                      255,
                                      norm_type=cv2.NORM_MINMAX,
                                      dtype=cv2.CV_32FC1)
    threshold_harris = 125
    # Extract keypoints
    keypoints = []
    for x in range(0, harris_normalized.shape[0]):
        for y in range(0, harris_normalized.shape[1]):
            if harris_normalized[x][y] > threshold_harris:
                keypoints.append(cv2.KeyPoint(y, x, 1))

# Define descriptor
    orb = cv2.ORB_create()
    # Compute descriptors
    _, des = orb.compute(img, keypoints)
    return (keypoints, des)
Esempio n. 3
0
def get_descriptors(img):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img = clahe.apply(img)
    #	LIMPIAMOS LA IMAGEN DE RUIDO MEDIANTE UNA LIBRERIA IMAGE_ENHANCE
    img = image_enhance.image_enhance(img)
    img = numpy.array(img, dtype=numpy.uint8)
    # ESCOGEMOS EL MEJOR UMBRAL PARA LA IMAGEN
    ret, img = cv2.threshold(img, 127, 255,
                             cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
    img[img == 255] = 1

    # ESQUELETIZAR LA HUELLA PARA TENER MEJOR RESOLUCION
    skeleton = skeletonize(img)
    skeleton = numpy.array(skeleton, dtype=numpy.uint8)
    skeleton = removedot(skeleton)
    # DETECTAMOS LOS PUNTOS CRITICOS CON HARRIS
    harris_corners = cv2.cornerHarris(img, 3, 3, 0.04)
    harris_normalized = cv2.normalize(harris_corners,
                                      0,
                                      255,
                                      norm_type=cv2.NORM_MINMAX,
                                      dtype=cv2.CV_32FC1)
    threshold_harris = 125
    # EXTRAEMOS LOS PUNTOS CLAVES
    keypoints = []
    for x in range(0, harris_normalized.shape[0]):
        for y in range(0, harris_normalized.shape[1]):
            if harris_normalized[x][y] > threshold_harris:
                keypoints.append(cv2.KeyPoint(y, x, 1))
    # DEFINIMOS UN DESCRIPTOR
    orb = cv2.ORB_create()
    # CALCULAR DESCRIPTOR
    _, des = orb.compute(img, keypoints)
    return (keypoints, des)
def get_descriptors(img, imageName, database):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img = clahe.apply(img)
    img = image_enhance.image_enhance(img)
    img = np.array(img, dtype=np.uint8)

    # Threshold
    ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)

    # Normalize to 0 and 1 range
    img[img == 255] = 1

    # Thinning
    skeleton = skeletonize(img)
    skeleton = np.array(skeleton, dtype=np.uint8)
    skeleton = removedot(skeleton)

    # Creating Block Size of 144 x 96 total of 8 blocks for an image and then generating descriptors and keypoints
    # Storing these 4 descriptors and 8 keypoints of one image in a Mainlist and key(image name) is associated to represent this
    # list in a dictionary. So we store all these lists of individual in dictionary and pickling dictionary

    # Creating List Format ( [[keypoints][descriptors]]): List initialization
    DescriptorList = list()

    for i in range(0, 400, 200):
        for j in range(0, 274, 137):
            blockImg = img[i:i + 200, j:j + 137]
            # Harris corners
            harris_corners = cv2.cornerHarris(blockImg, 3, 3, 0.04)
            harris_normalized = cv2.normalize(harris_corners, 0, 255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32FC1)
            threshold_harris = 125;
            # Extract keypoints
            keypoints = []
            for x in range(0, harris_normalized.shape[0]):
                for y in range(0, harris_normalized.shape[1]):
                    if harris_normalized[x][y] > threshold_harris:
                        keypoints.append(cv2.KeyPoint(y, x, 1))
            # Define descriptor
            orb = cv2.ORB_create()
            # Compute descriptors
            _, des = orb.compute(blockImg, keypoints)

            # pca = PCA(2)  # project from 32 to 2 dimensions
            # projected = pca.fit_transform(des)
            # print(des.shape)
            # print(projected.shape)
            Reduced_des = PCA(des)
            print(type(Reduced_des))
            print(Reduced_des.shape)
            DescriptorList.append(Reduced_des)

    database.update({imageName: DescriptorList})
Esempio n. 5
0
def get_descriptors(img):
    #CLAHE=Contrast Limited Adaptive Histogram Equalization
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img = clahe.apply(img)
    img = image_enhance.image_enhance(img)
    img = numpy.array(img, dtype=numpy.uint8)
    # Threshold
    ret, img = cv2.threshold(img, 127, 255,
                             cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    # Normalize to 0 and 1 range
    img[img == 255] = 1
    img_inv = cv2.bitwise_not(img)

    #Thinning
    skeleton = skeletonize(img)
    skeleton = removedot(skeleton)
    skeletonF32 = numpy.float32(skeleton)

    # Harris corners= combination of edge and corner detector
    harris_corners = cv2.cornerHarris(skeletonF32, 3, 3, 0.04)
    harris_normalized = cv2.normalize(harris_corners,
                                      0,
                                      255,
                                      norm_type=cv2.NORM_MINMAX,
                                      dtype=cv2.CV_32FC1)
    threshold_harris = 125

    # Extract keypoints
    keypoints = []
    for x in range(0, harris_normalized.shape[0]):
        for y in range(0, harris_normalized.shape[1]):
            if harris_normalized[x][y] > threshold_harris:
                keypoints.append(cv2.KeyPoint(y, x, 1))

    # Define descriptor
    orb = cv2.ORB_create()
    # Compute descriptors
    _, des = orb.compute(img, keypoints)

    return (skeleton, keypoints, des)
Esempio n. 6
0
def get_descriptors(img):
    i = random.randint(0, 1000)
    # Apply Clahe
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img = clahe.apply(img)
    img1 = img * 255
    a = "CLAHE" + str(i)
    # cv2.imshow("Clahe",img)
    cv2.imwrite(f"./static/img/{a}.png", img1)

    # Enhance image
    img = image_enhance.image_enhance(img)
    img = numpy.array(img, dtype=numpy.uint8)
    # cv2.imshow("enhance",255*img)
    img2 = img * 255
    b = "Enhance" + str(i)
    cv2.imwrite(f"./static/img/{b}.png", img2)

    # Threshold
    ret, img = cv2.threshold(img, 127, 255,
                             cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
    # Normalize to 0 and 1 range
    img[img == 255] = 1
    img3 = img
    c = "Normalize" + str(i)
    # cv2.imshow("Threshold",img)
    cv2.imwrite(f"./static/img/{c}.png", img3)

    #  plot_figure(img1,img2,img3,a,b,c)

    #Skeleton & Thinning
    skeleton = skeletonize(img)
    img1 = skeleton * 255
    a = "Skeleton" + str(i)
    cv2.imwrite(f"./static/img/{a}.png", img1)
    skeleton = numpy.array(skeleton, dtype=numpy.uint8)
    #cv2.imshow("skeleton",skeleton)
    skeleton = removedot(skeleton)
    img2 = skeleton * 255
    b = "Thinning" + str(i)
    #cv2.imwrite(f"./static/img/{b}.png",img2)
    # Harris corners
    harris_corners = cv2.cornerHarris(img, 3, 3, 0.04)
    harris_normalized = cv2.normalize(harris_corners,
                                      0,
                                      255,
                                      norm_type=cv2.NORM_MINMAX,
                                      dtype=cv2.CV_32FC1)
    threshold_harris = 125

    # Extract keypoints
    keypoints = []
    for x in range(0, harris_normalized.shape[0]):
        for y in range(0, harris_normalized.shape[1]):
            if harris_normalized[x][y] > threshold_harris:
                #print(x,y)
                keypoints.append(cv2.KeyPoint(y, x, 1))
    img3 = cv2.drawKeypoints(img, keypoints, outImage=None)
    c = "Keypoints" + str(i)
    #  plot_figure(img1,img2,img3,a,b,c)
    cv2.imwrite(f"./static/img/{c}.png", img3)

    # Define descriptor
    orb = cv2.ORB_create()
    # Compute descriptors
    _, des = orb.compute(img, keypoints)
    return (keypoints, des)
def get_descriptors(img, imageName, database, KeypointsLength):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img = clahe.apply(img)
    img = image_enhance.image_enhance(img)
    img = np.array(img, dtype=np.uint8)

    # Threshold
    ret, img = cv2.threshold(img, 127, 255,
                             cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)

    # Normalize to 0 and 1 range
    img[img == 255] = 1

    # Thinning
    skeleton = skeletonize(img)
    skeleton = np.array(skeleton, dtype=np.uint8)
    skeleton = removedot(skeleton)

    # Creating Block Size of 144 x 96 total of 8 blocks for an image and then generating descriptors and keypoints
    # Storing these 4 descriptors and 8 keypoints of one image in a Mainlist and key(image name) is associated to represent this
    # list in a dictionary. So we store all these lists of individual in dictionary and pickling dictionary

    # Creating List Format ( [[keypoints][descriptors]]): List initialization
    DescriptorList = list()

    imageiter = 1
    InnerList = list()
    for i in range(0, 399, 133):

        for j in range(0, 273, 91):
            blockImg = img[i:i + 133, j:j + 91]

            # Harris corners
            harris_corners = cv2.cornerHarris(blockImg, 3, 3, 0.04)
            harris_normalized = cv2.normalize(harris_corners,
                                              0,
                                              255,
                                              norm_type=cv2.NORM_MINMAX,
                                              dtype=cv2.CV_32FC1)
            threshold_harris = 125
            # Extract keypoints
            KeypointsList = []
            blockKeypoints = list()
            for x in range(0, harris_normalized.shape[0]):
                for y in range(0, harris_normalized.shape[1]):
                    if harris_normalized[x][y] > threshold_harris:
                        blockKeypoints.append(cv2.KeyPoint(y, x, 1))
            # Define descriptor
            orb = cv2.ORB_create()
            # Compute descriptors
            _, blockDescriptors = orb.compute(blockImg, blockKeypoints)

            KeypointsList.append(blockKeypoints)
            DescriptorList.append(blockDescriptors)
            os.chdir("E:\ML\Fingerprint ML Project\Fingerprint\Image_Blocks")
            blockImg[blockImg == 1] = 255

            InnerList.append(len(blockKeypoints))

            if imageiter == 5:
                Kimg = cv2.drawKeypoints(blockImg,
                                         blockKeypoints,
                                         outImage=None)
                return Kimg

            imageiter += 1

    KeypointsLength.append(InnerList)