Example #1
0
	def __init__(self):
		self.surf = cv2.xfeatures2d.SURF_create()
		FLANN_INDEX_KDTREE = 0
		index_params = dict(algorithm=0, trees=5)
		search_params = dict(checks=50)
		self.flann = cv2.FlannBasedMatcher(index_params, search_params)
 def set_matcher(self):
     FLANN_INDEX_KDTREE = 0
     index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
     search_params = dict(checks=50)
     self.matcher = cv2.FlannBasedMatcher(index_params, search_params)
Example #3
0
import math
import cv2
import time
from geometry_msgs.msg import Twist, Vector3, Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import smach
import smach_ros

MIN_MATCH_COUNT = 20
detector = cv2.xfeatures2d.SIFT_create()

FLANN_INDEX_KDITREE = 0
flannParam = dict(algorithm=FLANN_INDEX_KDITREE, tree=5)
flann = cv2.FlannBasedMatcher(flannParam, {})

trainImg = cv2.imread("soldadinho.jpg", 0)

#cv2.imshow('teste',trainImg)

trainKP, trainDesc = detector.detectAndCompute(trainImg, None)

#cv2.waitKey(0)


def auto_canny(image, sigma=0.33):
    # compute the median of the single channel pixel intensities
    v = np.median(image)

    # apply automatic Canny edge detection using the computed median
kp1 = detector.detect(i1)
kp2 = detector.detect(i2)
print("Keypoints:", len(kp1), len(kp2))

kp1, des1 = detector.compute(i1, kp1)
kp2, des2 = detector.compute(i2, kp2)
print("Descriptors:", len(des1), len(des2))

FLANN_INDEX_KDTREE = 1
flann_params = {
    'algorithm': FLANN_INDEX_KDTREE,
    'trees': 5
}
#search_params = dict(checks=50)
search_params = dict()
matcher = cv2.FlannBasedMatcher(flann_params, search_params)
matches = matcher.knnMatch(des1,des2,k=5)
print("Raw matches:", len(matches))

filt_matches = []
for i, m in enumerate(tqdm(matches)):
    min_index = 0
    min_value = 99999999999999999999999.9
    for j in range(len(m)):
        p1 = np.array(kp1[m[j].queryIdx].pt)
        p2 = np.array(kp2[m[j].trainIdx].pt)
        px_dist = np.linalg.norm(p1-p2)
        px_dist = 1
        a1 = np.array(kp1[m[j].queryIdx].angle)
        a2 = np.array(kp2[m[j].trainIdx].angle)
        angle = (a1-a2+180) % 360 - 180
Example #5
0
def recebeImagem(msg):
    img = converteImagem(msg)

    orb = cv.ORB_create(nfeatures=1000)

    kp1, des1 = orb.detectAndCompute(base, None)

    hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)

    kerno = np.ones((5,5),"uint8")

    #Filtragem da cor amarela
    min_amarelo = np.array([20,245,245], np.uint8)
    max_amarelo = np.array([40,255,255], np.uint8)
    masc_amarelo = cv.inRange(hsv,min_amarelo,max_amarelo)
    masc_amarelo = cv.dilate(masc_amarelo,kerno)
    res_amarelo = cv.bitwise_and(img,img,mask=masc_amarelo)

    #Filtragem da cor azul
    min_azul = np.array([99,50,50], np.uint8)
    max_azul = np.array([119,255,255], np.uint8)
    masc_azul = cv.inRange(hsv,min_azul,max_azul)
    masc_azul = cv.dilate(masc_azul,kerno)
    masc_azul = cv.dilate(masc_azul,kerno)
    res_azul = cv.bitwise_and(img,img,mask=masc_azul)

    kp2, des2 = orb.detectAndCompute(res_azul, None)

    FLANN_INDEX_LSH = 6
    index_params = dict(algorithm = FLANN_INDEX_LSH, table_number = 6, key_size = 12, multi_probe_level = 1)
    search_params = dict(checks = 100)

    flann = cv.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1,des2,k=2)

    good = []
    for i,m_n in enumerate(matches):
        if len(m_n) != 2:
            continue
        (m,n) = m_n
        if m.distance < 0.7*n.distance:
            good.append(m)

    if(len(good)<5):
        return

    #Calcula os pontos da base na imagem
    pontoImagem = []

    pontoReal = []

    #train = kp2
    #query = kp1

    for match in good:
        point_base = kp1[match.queryIdx].pt
        point_photo = kp2[match.trainIdx].pt

        #Desloca o ponto para o centro de coordenadas do centro da base
        point_baseMetro = [point_base[0]-centro_base[0],point_base[1]-centro_base[1], 0]

        #Converte para metro
        point_baseMetro[0] = point_baseMetro[0] * escala 
        point_baseMetro[1] = point_baseMetro[1] * escala 

        #Adiciona as listas
        pontoImagem.append([point_photo[0],point_photo[1]])
        pontoReal.append(point_baseMetro)

    pontoImagem = np.array(pontoImagem, dtype=np.float32)
    pontoReal = np.array(pontoReal,dtype=np.float32)

    a, RObj, tObj, _ = cv.solvePnPRansac(pontoReal,pontoImagem , K, np.zeros((5,1)))

    if not a:
        return

    RObj, _ = cv.Rodrigues(RObj)

    RCamera, tCamera = inverteTransformacao(RObj, tObj)

    publicaBase(RCamera, tCamera)

    rospy.loginfo("Found "+str(len(good))+" matches")
Example #6
0
    def calculate_offset(self, img1, img2, orientation):

        if (orientation == "vertical"):
            # Calculate rough overlap in pixels
            overlap_px = img2.shape[0] * CONFIG['overlap']
            # Convert images to grayscale and reduce size to scale_factor
            i1 = cv2.cvtColor(
                cv2.resize(img1[-int(overlap_px):, :, :], (0, 0),
                           fx=CONFIG['scale_factor'],
                           fy=CONFIG['scale_factor']), cv2.COLOR_BGR2GRAY)
            i2 = cv2.cvtColor(
                cv2.resize(img2[:int(overlap_px), :, :], (0, 0),
                           fx=CONFIG['scale_factor'],
                           fy=CONFIG['scale_factor']), cv2.COLOR_BGR2GRAY)
        else:
            overlap_px = img2.shape[1] * CONFIG['overlap']
            # Convert images to grayscale and reduce size to scale_factor
            i1 = cv2.cvtColor(
                cv2.resize(img1[:, -int(overlap_px):, :], (0, 0),
                           fx=CONFIG['scale_factor'],
                           fy=CONFIG['scale_factor']), cv2.COLOR_BGR2GRAY)
            i2 = cv2.cvtColor(
                cv2.resize(img2[:, :int(overlap_px), :], (0, 0),
                           fx=CONFIG['scale_factor'],
                           fy=CONFIG['scale_factor']), cv2.COLOR_BGR2GRAY)

        # Find SIFT keypoints and descriptors
        sift = cv2.xfeatures2d.SIFT_create(nfeatures=CONFIG['max_features'])
        print('\t- Finding keypoints and descriptors for image 1')
        kp1, des1 = sift.detectAndCompute(i1, None)
        print('\t- Finding keypoints and descriptors for image 2')
        kp2, des2 = sift.detectAndCompute(i2, None)

        # Use FLANN to determine matches
        print('\t- Finding matches')

        # As of 10/11/2016, Flann is broken on binary builds of opencv for windows.  Fall back to BF in those cases.
        if (platform.system() != 'Windows'):
            flann = cv2.FlannBasedMatcher({
                'algorithm': 0,
                'trees': 5
            }, {'checks': CONFIG['flann_checks']})
            matches = flann.knnMatch(des1, des2, k=2)
        else:
            bfMatch = cv2.BFMatcher(cv2.NORM_L2)
            matches = bfMatch.knnMatch(des1, des2, k=2)

        # Limit to reasonable matches
        good_matches = [m for m, n in matches if m.distance < 0.7 * n.distance]
        src_pts = np.float32([
            kp1[match.queryIdx].pt for match in good_matches
        ]).reshape(-1, 1, 2)
        dst_pts = np.float32([
            kp2[match.trainIdx].pt for match in good_matches
        ]).reshape(-1, 1, 2)

        # We're not doing any robust analyses of outliers, so let's just take the median and see how it works
        x_offset = int(
            np.median([elem[0][0] for elem in np.subtract(src_pts, dst_pts)]))
        y_offset = int(
            np.median([elem[0][1] for elem in np.subtract(src_pts, dst_pts)]))
        # Rescale offset for original size and return
        print('\t- X Offset found: {} px'.format(x_offset *
                                                 (1 / CONFIG['scale_factor'])))
        print('\t- Y Offset found: {} px'.format(y_offset *
                                                 (1 / CONFIG['scale_factor'])))
        return (x_offset * (1 / CONFIG['scale_factor']),
                y_offset * (1 / CONFIG['scale_factor']))
# sift = cv2.xfeatures2d_SIFT_create()
# print('Versiond e OpenCV {}'.format(cv2.__version__))
sift = cv2.xfeatures2d.SIFT_create(
)  # No este disponible para la version gratuita
# print('Caracteristicas {}'.format(sift))
# La variable kp = son las key points de donde esta las caracteristicas
kp1, descripcion1 = sift.detectAndCompute(cereal, None)
kp2, descripcion2 = sift.detectAndCompute(cereales, None)

# Emparejamiento de las imagenes
# Creamos un diccionario con 2 parametros
indice = dict(algorithm=0, trees=5)
# Creamos un diccionario para la busqueda
busqueda = dict(checks=50)
# el flan se usa paa encontrar el emparejamiento de las 2 imagenes
flan = cv2.FlannBasedMatcher(indice, busqueda)
# Creamos el emparejamiento de las descripciones de las 2 imagenes
emparejamientos = flan.knnMarch(descripcion1, descripcion2, k=2)
print('Los KP de emparejamiento son {}'.format(emparejamientos))

# Buscamos los que mas conciden o que tengan menor distancia entre ellos
mejores = []
for emparejamiento1, emparejamiento2 in emparejamientos:
    if emparejamiento1.distance < 0.7 * emparejamiento2.distance:
        mejores.append([emparejamiento1])

# dibujamos las lineas de emparejamiento de la imagen
imagen_emparejada = cv2.drawMatchesKnn(cereal,
                                       kp1,
                                       cereales,
                                       kp2,
Example #8
0
def setflann(kdtree=0, trees=5, checks=50):
    index_params = dict(algorithm=kdtree, trees=trees)
    search_params = dict(checks=checks)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    return flann
Example #9
0
def find_show(src, dst):
    img1 = cv2.imread(dst, 0)
    img2 = cv2.imread(src, 0)

    # the larger edgeThreshold is, the more sift keypoints we find 
    sift = cv2.SIFT(edgeThreshold=100)

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)   # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    print len(matches), "sift feature points found"

    good = []
    for m, n in matches:
        # print m.distance, n.distance, m.distance / n.distance
        # filter those pts similar to the next good ones
        if m.distance < 0.9 * n.distance:
            good.append(m)
    print len(good), "good feature points"

    # require count >= 4 in function cvFindHomography
    if len(good) >= 4:
        sch_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        img_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # M是转化矩阵
        M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()

        # 计算四个角矩阵变换后的坐标,也就是在大图中的坐标
        h, w = img1.shape[:2]
        pts = np.float32(
            [[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        # trans numpy array to python list
        # [(a, b), (a1, b1), ...]
        pypts = []
        for npt in dst.astype(int).tolist():
            pypts.append(tuple(npt[0]))

        lt, br = pypts[0], pypts[2]
        middle_point = (lt[0] + br[0]) / 2, (lt[1] + br[1]) / 2

        result = dict(
            result=middle_point,
            rectangle=pypts,
            confidence=min(1.0 * matchesMask.count(1) / 10, 1.0)
        )
        print result

        selected = []
        for k, v in enumerate(good):
            if matchesMask[k]:
                selected.append(v)
        print len(selected), "selected by homography"

    else:
        raise Exception("not enough matches found %s/%s" % (len(good), 4))

    # all sift feature points
    drawMatches(img1, kp1, img2, kp2, [i[0] for i in matches])
    # good feature points, good one distance/next good one distance <= 90%
    drawMatches(img1, kp1, img2, kp2, good)
    # select points by homography, those of same matrix transformation 
    drawMatches(img1, kp1, img2, kp2, selected)
    def __detect_contour(self, matcher, min_match_count, dst_threshold,
                         n_features, neighbours, rc_threshold):
        """
        The method provides detection of the field where the logo must be inserted

        :param matcher:  tunes the Matcher object
        :param min_match_count:  the minimum quantity of matched keypoints between frame and logo to detect the field
        :param dst_threshold: the threshold for the distance between matched descriptors
        :param n_features: the number of features for the SIFT algorithm
        :param neighbours: the amount of best matches found per each query descriptor
        :param rc_threshold: the threshold for the Homographies mask
        :return: switch that indicates whether the required field was found or not; the required field, the required
        field in hsv mode, cropped frame corners coordinates, cropped frame corner coordinates
        """
        gray_frame = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
        self.template = cv.imread(self.template)
        gray_template = cv.cvtColor(self.template, cv.COLOR_BGR2GRAY)

        sift = cv.xfeatures2d.SIFT_create(n_features)

        kp1, des1 = sift.detectAndCompute(gray_template, None)
        kp2, des2 = sift.detectAndCompute(gray_frame, None)

        index_params = {
            'algorithm': matcher['index_params'][0],
            'trees': matcher['index_params'][1]
        }
        search_params = {'checks': matcher['search_params']}
        flann = cv.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=neighbours)

        good = []
        for m, n in matches:
            if m.distance < dst_threshold * n.distance:
                good.append(m)

        cr_frame = None
        frame_hsv = None
        min_max = None
        if len(good) >= min_match_count:
            switch = True
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good])
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good])
            m, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,
                                        rc_threshold)
            h, w = gray_template.shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv.perspectiveTransform(pts, m)
            x_corner_list = [dst[i][0][0] for i in range(len(dst))]
            y_corner_list = [dst[j][0][1] for j in range(len(dst))]
            x_min, x_max = np.int64(min(x_corner_list)), np.int64(
                max(x_corner_list))
            y_min, y_max = np.int64(min(y_corner_list)), np.int64(
                max(y_corner_list))
            min_max = [x_min, x_max, y_min, y_max]
            cr_frame = self.frame[y_min:y_max, x_min:x_max]
            frame_hsv = cv.cvtColor(cr_frame, cv.COLOR_BGR2HSV)
        else:
            switch = False
        return switch, cr_frame, frame_hsv, min_max
Example #11
0
def match(scn_img, src_img, filename, kp1, des1, kp2, des2, pos):

    #==============================================>FLANN MATCHING
    start_time = time.time()

    FLANN_INDEX_LSH = 0
    flann_params = dict(
        algorithm=FLANN_INDEX_LSH,
        table_number=6,  # 12
        key_size=12,  # 20
        multi_probe_level=1)  #2
    matcher = cv2.FlannBasedMatcher(flann_params, {})

    if (len(kp2) > 10):
        matches = matcher.knnMatch(np.asarray(des1, np.float32),
                                   np.asarray(des2, np.float32), 2)

        #==============================================>Ratio Test

        good = []
        for m, n in matches:
            if m.distance < 0.67 * n.distance:
                good.append(m)
        # print ("good matches",len(good))

        MIN_MATCH_COUNT = 10

        if len(good) > MIN_MATCH_COUNT:

            src_pts = np.float32([kp1[m.queryIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            try:
                matchesMask = mask.ravel().tolist()
                draw_params = dict(
                    matchColor=(0, 255, 0),  # draw matches in green color
                    singlePointColor=None,
                    matchesMask=matchesMask,  # draw only inliers
                    flags=2)

                posadd = str(filename) + " = " + str(len(good)) + " matches"
                # pos.append("      "+str(filename))
                pos.append(posadd)

                # img3=cv2.drawMatches(scn_img,kp1,src_img,kp2,good,None,**draw_params)
                # name=str(time.time())+".jpg"
                # cv2.imwrite("input1.jpg", img3)

                # h,w= scn_img.shape
                # pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
                # dst = cv2.perspectiveTransform(pts,M)
                # src_img = cv2.polylines(src_img,[np.int32(dst)],True,255,3, cv2.LINE_AA)
                #==============================================>DRAW KPT

                # savekeyPointsOnImage(src_img,"src.jpg" ,kp2,w2,h2)

                # img5 = cv2.drawKeypoints(src_img, kp2 ,None, color=(0,255,255))
                # cv2.imwrite('src.jpg', img5)
            except:
                print "       false +ve - " + str(filename)

        count = time.time() - start_time
        a.append(count)
        # print("Server Side--- %s seconds ---" % (count))

        # draw_params = dict(matchColor = (0,255,0), # draw matches in green color
        # singlePointColor = None,
        # matchesMask = matchesMask, # draw only inliers
        # flags = 2)

        # img3=cv2.drawMatches(scn_img,kp1,src_img,kp2,good,None,**draw_params)
        # plt.imshow(img3, 'gray'),plt.show()
    else:
        print "error - " + str(filename)

    return a

    # hello.append([filename,len(kp1),len(pos),pos])
Example #12
0
 def descriptor_matcher(self):
     FLANN_INDEX_KDTREE = 0
     index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
     search_params = dict(checks=50)  # or pass empty dictionary
     return cv2.FlannBasedMatcher(index_params, search_params)
Example #13
0
def stitchImages(base_img_rgb, images_array, round):
    if (len(images_array) < 1):
        #print "Image array empty, ending stitchImages()"
        return base_img_rgb

    base_img = cv2.GaussianBlur(cv2.cvtColor(base_img_rgb, cv2.COLOR_BGR2GRAY),
                                (5, 5), 0)

    # Use the SURF feature detector
    detector = cv2.SURF()

    # Find key points in base image for motion estimation
    base_features, base_descs = detector.detectAndCompute(base_img, None)

    # Parameters for nearest-neighbor matching
    FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
    flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    matcher = cv2.FlannBasedMatcher(flann_params, {})

    ##print "Iterating through next images..."

    closestImage = None

    # TODO: Thread this loop since each iteration is independent

    # Find the best next image from the remaining images
    for index, next_img_rgb in enumerate(images_array):
        next_img = cv2.GaussianBlur(
            cv2.cvtColor(next_img_rgb, cv2.COLOR_BGR2GRAY), (5, 5), 0)

        #print "\t Finding points..."

        next_features, next_descs = detector.detectAndCompute(next_img, None)

        matches = matcher.knnMatch(next_descs,
                                   trainDescriptors=base_descs,
                                   k=2)
        #print "\t Match Count: ", len(matches)

        matches_subset = filter_matches(matches)
        #print "\t Filtered Match Count: ", len(matches_subset)

        distance = imageDistance(matches_subset)
        #print "\t Distance from Key Image: ", distance

        averagePointDistance = distance / float(len(matches_subset))
        #print "\t Average Distance: ", averagePointDistance

        kp1 = []
        kp2 = []

        for match in matches_subset:
            kp1.append(base_features[match.trainIdx])
            kp2.append(next_features[match.queryIdx])

        p1 = np.array([k.pt for k in kp1])
        p2 = np.array([k.pt for k in kp2])

        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        #print '%d / %d  inliers/matched' % (np.sum(status), len(status))

        inlierRatio = float(np.sum(status)) / float(len(status))

        # if ( closestImage == None or averagePointDistance < closestImage['dist'] ):
        if closestImage == None or inlierRatio > closestImage['inliers']:
            closestImage = {}
            closestImage['h'] = H
            closestImage['inliers'] = inlierRatio
            closestImage['dist'] = averagePointDistance
            closestImage['index'] = index
            closestImage['rgb'] = next_img_rgb
            closestImage['img'] = next_img
            closestImage['feat'] = next_features
            closestImage['desc'] = next_descs
            closestImage['match'] = matches_subset

    #print "Closest Image Ratio: ", closestImage['inliers']

    new_images_array = images_array
    del new_images_array[closestImage[
        'index']]  # Shortening the images array to not have the last used image

    H = closestImage['h']
    if np.isnan(H[0]).any():
        print "ImageStitching(): Error: About to crash, H is nan"
    H = H / H[2, 2]
    H_inv = np.linalg.inv(H)

    if closestImage['inliers'] > 0.1:  # and
        (min_x, min_y, max_x, max_y) = findDimensions(closestImage['img'],
                                                      H_inv)

        # Adjust max_x and max_y by base img size
        max_x = max(max_x, base_img.shape[1])
        max_y = max(max_y, base_img.shape[0])

        move_h = np.matrix(np.identity(3), np.float32)

        if (min_x < 0):
            move_h[0, 2] += -min_x
            max_x += -min_x

        if (min_y < 0):
            move_h[1, 2] += -min_y
            max_y += -min_y

        #print "Homography: \n", H
        #print "Inverse Homography: \n", H_inv
        #print "Min Points: ", (min_x, min_y)

        mod_inv_h = move_h * H_inv

        if math.isnan(math.ceil(max_x)) or math.isnan(math.ceil(max_y)):
            # If program was unable to stitch this correctly, return the work so far instead of crashing.
            #print "Error: About to crash, max_x is NaN"
            return base_img_rgb

        img_w = int(math.ceil(max_x))
        img_h = int(math.ceil(max_y))

        #print "New Dimensions: ", (img_w, img_h)

        # Warp the new image given the homography from the old image
        base_img_warp = cv2.warpPerspective(base_img_rgb, move_h,
                                            (img_w, img_h))
        #print "Warped base image"

        # utils.showImage(base_img_warp, scale=(0.2, 0.2), timeout=5000)
        # cv2.destroyAllWindows()

        next_img_warp = cv2.warpPerspective(closestImage['rgb'], mod_inv_h,
                                            (img_w, img_h))
        #print "Warped next image"

        # utils.showImage(next_img_warp, scale=(0.2, 0.2), timeout=5000)
        # cv2.destroyAllWindows()

        # Put the base image on an enlarged palette
        enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8)

        #print "Enlarged Image Shape: ",  enlarged_base_img.shape
        #print "Base Image Shape: ",      base_img_rgb.shape
        #print "Base Image Warp Shape: ", base_img_warp.shape

        # enlarged_base_img[y:y+base_img_rgb.shape[0],x:x+base_img_rgb.shape[1]] = base_img_rgb
        # enlarged_base_img[:base_img_warp.shape[0],:base_img_warp.shape[1]] = base_img_warp

        # Create a mask from the warped image for constructing masked composite
        (ret, data_map) = cv2.threshold(
            cv2.cvtColor(next_img_warp, cv2.COLOR_BGR2GRAY), 0, 255,
            cv2.THRESH_BINARY)

        enlarged_base_img = cv2.add(enlarged_base_img,
                                    base_img_warp,
                                    mask=np.bitwise_not(data_map),
                                    dtype=cv2.CV_8U)

        # Now add the warped image
        final_img = cv2.add(enlarged_base_img, next_img_warp, dtype=cv2.CV_8U)

        # utils.showImage(final_img, scale=(0.2, 0.2), timeout=0)
        # cv2.destroyAllWindows()

        # Crop off the black edges
        final_gray = cv2.cvtColor(final_img, cv2.COLOR_BGR2GRAY)
        _, thresh = cv2.threshold(final_gray, 1, 255, cv2.THRESH_BINARY)
        contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_NONE)
        #print "Found %d contours..." % (len(contours))

        max_area = 0
        best_rect = (0, 0, 0, 0)

        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)
            # #print "Bounding Rectangle: ", (x,y,w,h)

            deltaHeight = h - y
            deltaWidth = w - x

            area = deltaHeight * deltaWidth

            if area > max_area and deltaHeight > 0 and deltaWidth > 0:
                max_area = area
                best_rect = (x, y, w, h)

        if (max_area > 0):
            #print "Maximum Contour: ", max_area
            #print "Best Rectangle: ", best_rect

            final_img_crop = final_img[best_rect[1]:best_rect[1] +
                                       best_rect[3],
                                       best_rect[0]:best_rect[0] +
                                       best_rect[2]]

            #utils.showImage(final_img_crop, scale=(0.2, 0.2), timeout=0)
            #cv2.destroyAllWindows()

            final_img = final_img_crop

        return stitchImages(final_img, new_images_array, round + 1)

    else:
        return stitchImages(base_img_rgb, new_images_array, round + 1)
Example #14
0
    def find_screen_img(self, cam_img, screen_img=None, debug=False):
        """
        Find screen_img in cam_img.
        If executed successfully, the function return True.
        Meanwhile self.recovery_matrix will be computed, which is used to
        map camera image to top view
        """

        try:

            MATCH_THRESHOLD = 10
            FLANN_INDEX_KDTREE = 0
            AREA_THRESHOLD = 1000

            if screen_img is None:
                kp1, des1 = self._screen_features
                screen_img = self._screen_img
            else:
                kp1, des1 = self._detector.detectAndCompute(screen_img, None)

            kp2, des2 = self._detector.detectAndCompute(cam_img, None)

            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            search_params = dict(checks=50)
            flann = cv2.FlannBasedMatcher(index_params, search_params)

            matches = flann.knnMatch(des1, des2, k=2)

            # Perform Lowe's ratio test to select good points to proceed with.
            good = [m for m, n in matches if m.distance < 0.7 * n.distance]

            src_pts = np.float32([kp1[m.queryIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt
                                  for m in good]).reshape(-1, 1, 2)

            # check the property of the corners found out there
            self.screen2cam_matrix, mask = cv2.findHomography(
                src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()

            h, w = self._screen_img.shape[0:2]
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            self._screen_corners = cv2.perspectiveTransform(
                pts, self.screen2cam_matrix)

            if debug:
                cv2.imshow(
                    'debug',
                    draw_match(screen_img,
                               kp1,
                               self.draw_screen_boundary(cam_img),
                               kp2,
                               good,
                               matchesMask=matchesMask))
            else:
                cv2.destroyWindow('debug')

            if False in [
                    cv2.isContourConvex(self._screen_corners),
                    cv2.contourArea(self._screen_corners) > AREA_THRESHOLD,
                    sum(matchesMask) > MATCH_THRESHOLD
            ]:

                self.screen2cam_matrix = None
                self._screen_corners = None

                return False

            self.cam2screen_matrix, _ = cv2.findHomography(
                dst_pts, src_pts, cv2.RANSAC, 5.0)

            return True

        except cv2.error:

            self.screen2cam_matrix = None
            self._screen_corners = None
            return False
Example #15
0
def recognize_from_image(files=['2.jpeg', '1.jpeg']):
    if len(files) < 2:
        return False
    origin_photo = files[0]
    MIN_MATCH_COUNT = 10
    img_colored = cv2.imread(files[0], cv2.IMREAD_COLOR)
    origin = cv2.imread(files[0], 0)  # trainImage

    # Preprocess the pictures to accelerate
    for item in files:
        preprocess(item)

    i = 1
    print('before loop')
    while i < len(files):
        print(files[i])
        img = cv2.imread(files[i], 0)  # queryImage

        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img, None)
        kp2, des2 = sift.detectAndCompute(origin, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt
                                  for m in good]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()

            h, w = img.shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)

            origin = cv2.polylines(img_colored, [np.int32(dst)], True,
                                   (255, 0, 0), 7, cv2.LINE_AA)

        else:
            print("Not enough matches are found - %d/%d" %
                  (len(good), MIN_MATCH_COUNT))
            matchesMask = None

        i += 1

    plt.xticks([])
    plt.yticks([])
    plt.imshow(origin)
    plt.subplots_adjust(bottom=0)
    plt.subplots_adjust(top=1)
    plt.subplots_adjust(right=1)
    plt.subplots_adjust(left=0)
    plt.savefig('result.jpg')
def match_homography_excel_version(template,test):                    
     #%% Initial initializations
    
    ## Constant parameters to be tuned
    MIN_MATCH_COUNT = 30 # search for the template whether there are at least
                         # MIN_MATCH_CURENT good matches in the scene
    MIN_MATCH_CURRENT = 5 # stop when your matched homography has less than that features
    LOWE_THRESHOLD = 0.8 # a match is kept only if the distance with the closest
                         # match is lower than LOWE_THRESHOLD * the distance with
                         # the second best match
    IN_POLYGON_THRESHOLD = 0.95 # homography kept only if at least this fraction
                                # of inliers are in the polygon
    OUT_OF_IMAGE_THRESHOLD = 0.1 # Homography kept only if the square is not 
                                 # too much out from test image
    #ALPHA=0.9999999999999 # this constant allow us to determine the quantiles
                          # to be used to discriminate areas
    IMAGE_RATIO_TO_CROP = 0.8 # after the computation of the image representing
                              # the pixelwise difference norm, a cropped version
                              # of it is computed, in which only the central part is keeped
    MEDIAN_THRESHOLD = np.multiply(441.672956,0.25) # threshold on the median, used to
                                                 # discard wrong matches if the
                                                 # cropped pixelwise difference norm 
                                                 # have it greater than this.
                                                 # 441.672956 is the maximum
                                                 # possible cropped pixelwise difference
    RANDOM_MATCH_RATIO = 0.1 # number of randomly plotted matches at each
                             # print_random_matches iteration, in order to see
                             # better the matches connections
    ITERATIONS = 5 # number of iterations of print_random_matches
    MAX_DISCARDED_CONTINUOUSLY = 50 # max number of homography discarded in a row before stopping 
                                    # the algorithm
    
    ## Set the size of the figure to show
    matplotlib.rcParams["figure.figsize"]=(15,12)
    
    ## Load images 
    template_image = cv2.imread(template, cv2.IMREAD_COLOR) # template image
    test_image = cv2.imread(test, cv2.IMREAD_COLOR)  # test image
    
    ## Show the loaded images
    plt.imshow(cv2.cvtColor(template_image, cv2.COLOR_BGR2RGB)), plt.title('template'),plt.show()
    plt.imshow(cv2.cvtColor(test_image, cv2.COLOR_BGR2RGB)), plt.title('image'),plt.show()
    
  #%%  Initiate sift_detector detector

    ## Create sift_detector detector
    sift_detector = cv2.xfeatures2d.SIFT_create()
    
    ## Find the keypoints and descriptors with sift_detector from the template and test image
    template_keypoints,template_descriptors = sift_detector.detectAndCompute(template_image, None)
    test_keypoints,test_descriptors  = sift_detector.detectAndCompute(test_image, None)
    
    ## Show the number of keypoints found in the template and test image
    print('found ' + str(len(template_keypoints)) + ' keypoints in the template')
    print('found ' + str(len(test_keypoints)) + ' keypoints in the test image')
    
    # kp list of keypoints such that:
    #   kp[0].pt = location
    #   kp[0].angle = orientation
    #   kp[0].octave = scale information
    
    #%% Initialize a flann_matcher object to match keypoint witn nearest neighborhood. 
    
    # = From flann_matcher documentation ==================================================
    # 	flann_matcher_INDEX_LINEAR = 0,
    # 	flann_matcher_INDEX_KDTREE = 1,
    # 	flann_matcher_INDEX_KMEANS = 2,
    # 	flann_matcher_INDEX_COMPOSITE = 3,
    # 	flann_matcher_INDEX_KDTREE_SINGLE = 4,
    # 	flann_matcher_INDEX_HIERARCHICAL = 5,
    # 	flann_matcher_INDEX_LSH = 6,
    # 	flann_matcher_INDEX_KDTREE_CUDA = 7, // available if compiled with CUDA
    # 	flann_matcher_INDEX_SAVED = 254,
    # 	flann_matcher_INDEX_AUTOTUNED = 255,
    # =============================================================================
    
    ## Specify a constant representing the type of algorithm used by flann_matcher
    flann_matcher_INDEX_KDTREE = 1 # algorithm used is KDTREE
    
    ## Specify flann_matcher matcher creator parameters
    index_params = dict(algorithm=flann_matcher_INDEX_KDTREE, trees=5) # 5 trees used in the KDTREE search
    search_params = dict(checks=50) # number of times the trees in the index should be recursively traversed
    
    ## Create FLANN matcher
    flann_matcher = cv2.FlannBasedMatcher(index_params, search_params)
    
    #%% Find correspondences by matching the image features with the template features(this is not the same as matching template_descriptors with test_descriptors)
    
    ## Invoke flann_matcher methods to obtain k outputs: for each feature in the test_descriptors image returns the k closest features in the template_descriptors image
    matches =  flann_matcher.knnMatch(test_descriptors,template_descriptors,k=2) # there is no trehsold, the k closest points are returned
    
    ## Show the number features in test_descriptors image that have at least one match in template_descriptors image
    print('Found ' + str(len(matches)) + ' putative matches')
    
    ## Extract self similar and fingerprint list
    self_similar_list, fingerprint_list = self_similar_and_fingerprint_matches_extraction(template_descriptors)
    
    ## Plot self-similar feature matches
    self_matches_plot(template_image, template_keypoints, self_similar_list, 'Self-similar matches')
    
    ## Create a reorganized self similar template matches list
    ## and create an always true mask
    self_similar_template_matches = [[item] for items in self_similar_list for item in items]
    self_similar_template_mask = [[1] for i in iter(range(len(self_similar_template_matches)))]
    
    ## Plot randomly a subset of self-similar feature matches in the template
    print_random_matches(template_image, template_keypoints, template_image, 
                             template_keypoints, self_similar_template_matches, 
                             self_similar_template_mask, 
                             RANDOM_MATCH_RATIO, ITERATIONS)
    
    #%% Store all the good matches as per Lowe's ratio test
    # Lowe's ratio test removes the ambiguous and false matches:
    #   It keeps only matches where the distance with the closest match is lower 
    #   than LOWE_THRESHOLD * the distance with the second best match
    good_matches = []
    good_rescued_self_similar_mask = [] # mask of rescued self similar matches
                                             # with same length of good_matches
    
    ## Need to keep only good matches, so create a mask, each row corresponds to a match
    matches_mask = [[0,0] for i in iter(range(len(matches)))]
    
    ## Create also a mask to keep rescued self-similar feature matches
    rescued_self_similar_mask = [[0,0] for i in iter(range(len(matches)))]
    
    number_rescued_self_similar=0
    ## Apply Lowe's test for each match, modifying the mask accordingly
    for i,(m,n) in enumerate(matches):
        if m.distance < LOWE_THRESHOLD*n.distance:
            good_matches.append(m) # match appended to the list of good matches 
            good_rescued_self_similar_mask.append(0)
            matches_mask[i]=[1,0] # mask modified to consider the i-th match as good
        else:
            if len(self_similar_list[m.trainIdx])!=0:
                    second_nearest_keypoint = template_keypoints[n.trainIdx]
                    self_similar_keypoints = [template_keypoints[match.trainIdx] for match in self_similar_list[m.trainIdx]]
                    if second_nearest_keypoint in self_similar_keypoints:
                        good_matches.append(m) # match appended to the list of good matches
                        good_rescued_self_similar_mask.append(1)
                        rescued_self_similar_mask[i]=[1,0]  # mask modified to consider
                                                            # the i-th match as self-similar
                        matches_mask[i]=[1,0] # mask modified to consider the i-th match as good
                        number_rescued_self_similar+=1
    good_rescued_self_similar_mask = np.asarray(good_rescued_self_similar_mask)
    
    ## Compute a flat array of rescued self similar matches
    flat_rescued_self_similar_mask = np.zeros(len(rescued_self_similar_mask))
    for i,items in enumerate(rescued_self_similar_mask):
        if items[0]==1:
            flat_rescued_self_similar_mask[i]=1
    
    ## Show the number of good matches found
    print('Found ' + str(len(good_matches)) + 
          ' matches validated by the distance ratio test, ' + 
          str(number_rescued_self_similar) + ' self similar')
    ## Specify parameters for the function that shows good matches graphically
    draw_params = dict(matchColor = (0,255,0), # draw matches in green
                       singlePointColor = (0,0,255), # draw lone points in red
                       matchesMask = matches_mask, # draw only good matches
                       flags = 0)
    
    ## Good matches represented on another image
    matches_image = cv2.drawMatchesKnn(test_image, test_keypoints, template_image, 
                                       template_keypoints, matches, None, **draw_params)
    
    ## Plot the good matches
    plt.imshow(cv2.cvtColor(matches_image, cv2.COLOR_BGR2RGB))
    plt.title('All matches after ratio test'), plt.show()
    
    ## Plot rescued self-similar feature matches on the images
    ## Specify parameters
    draw_params = dict(matchColor = (0,255,0), # draw matches in green
                       singlePointColor = (0,0,255), # draw lone points in red
                       matchesMask = rescued_self_similar_mask, # draw only rescued
                                                                # self-similar matches
                       flags = 0)
    
    ## Rescued self-similar feature matches represented on another image
    self_similar_matches_image = cv2.drawMatchesKnn(test_image, test_keypoints, 
                                                    template_image, template_keypoints,
                                                    matches, None, **draw_params)
    
    ## Plot the self-similar feature matches
    plt.imshow(cv2.cvtColor(self_similar_matches_image, cv2.COLOR_BGR2RGB))
    plt.title('Rescued self-similar feature matches'), plt.show()
    
    ## Plot randomly a subset of self-similar feature matches between template
    ## and test image
    print_random_matches(test_image, test_keypoints, template_image, 
                             template_keypoints, matches, rescued_self_similar_mask, 
                             RANDOM_MATCH_RATIO, ITERATIONS)
    
    #%% Cluster good matches by fitting homographies through RANSAC
    
 
    
    ## Initilalize discarded homograpies counters (see print_discarded for more info)
    discarded_homographies = [0,0,0,0,0,0]
    
    ## Initialize areas of founded homography
    areas = []
    
    ## Initialize self similar per image and inliers per image counters
    self_similar_per_image = []
    inliers_per_image = []
    
    ## Initialize the buffer of temporary removed matches
    #temporary_removed_matches = list()
    
    ## Initialize the test image used to draw projected squares
    test_image_squares = test_image.copy()
    
    ## Create a polygon using test image vertices
    img_polygon = Polygon([(0,0), (0,test_image.shape[0]), (test_image.shape[1],test_image.shape[0]), (test_image.shape[1],0)])
    
    ## Create debug file
    discarded_file = open("debug.txt","w")
    
    ## Counter of continuously discarded homography
    discarded_cont_count = []
    discarded_cont_count.append(0)
    
    ## Continue to look for other homographies
    end = False
    while not end:
        
        #Shuffle matches and related masks in order to randomize ransac
        good_matches, good_rescued_self_similar_mask = shuffle_matches(good_matches, good_rescued_self_similar_mask)
        
        ## If have been discarded a large number of homograpies in a row, is likely that there aren't
        ## other good homograpies, and the algorithm ends 
        if discarded_cont_count[0] < MAX_DISCARDED_CONTINUOUSLY:
            ## If the number of remaining matches is low, is likely that there aren't
            ## other good homograpies, and the algorithm ends
            if len(good_matches) >= MIN_MATCH_COUNT:
                ## Retrieve coordinates of features keypoints in its image
                ## (the feature m.queryIdx inside test_image has been matched
                ##  with feature m.trainIdx inside template_image)
                src_pts = np.float32([template_keypoints[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
                dst_pts = np.float32([test_keypoints[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
                
                ## Apply RANSAC algorithm to fit homograpy: M is the final homography,
                ## mask represents the inliers
                H, inliers_mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
                
                ## If no available homograpies exist, the algorithm ends
                if H is not None:      
                    ## Create a list representing all the inliers of the
                    ## retrieved homography
                    matches_mask = inliers_mask.ravel().tolist()
                    
                    ## Retrieve coordinates of the inliers in the test image, 
                    ## and their index wrt the actual good matches list
                    dst_inliers = [dst_pts[i] for i in range(len(dst_pts)) if matches_mask[i]]
                    index_inliers = np.nonzero(matches_mask)[0]
                    
                    ## Project the vertices of the abstract rectangle around the
                    ## template image in the test one, using the found homography,
                    ## in order to localize the template in the scene
                    h, w = template_image.shape[0:2]
                    src_vrtx = np.float32([[0, 0], 
                                           [0, h - 1], 
                                           [w - 1, h - 1], 
                                           [w - 1, 0]]).reshape(-1, 1, 2)
                    dst_vrtx = cv2.perspectiveTransform(src_vrtx, H)  
                    
                    ## If the homography is degenerate, it is discarded
                    if is_rank_full(H, discarded_cont_count, discarded_homographies, discarded_file):
                        ## If the retrieved homography has been fitted using few matches, 
                        ## is likely that has poor performances and that there aren't other good homograpies, so the algorithm ends
                        if np.count_nonzero(matches_mask) >= MIN_MATCH_CURRENT:
                            ## Create a polygon using the projected vertices
                            polygon = Polygon([(dst_vrtx[0][0][0], dst_vrtx[0][0][1]), 
                                               (dst_vrtx[1][0][0], dst_vrtx[1][0][1]), 
                                               (dst_vrtx[2][0][0], dst_vrtx[2][0][1]), 
                                               (dst_vrtx[3][0][0], dst_vrtx[3][0][1])])
                                    
                            ## Homography kept only if the projected polygon
                            ## is mostly inside the image
                            if (out_points_ratio(dst_inliers, 
                                                 polygon, 
                                                 discarded_file, 
                                                 IN_POLYGON_THRESHOLD, 
                                                 discarded_homographies,
                                                 discarded_cont_count) and out_area_ratio(img_polygon,
                                                                                          polygon,
                                                                                          discarded_file,
                                                                                          OUT_OF_IMAGE_THRESHOLD,
                                                                                          discarded_homographies,
                                                                                          discarded_cont_count)):
                                
                                ## Create a mask over the left good matches of the 
                                ## ones that are inliers
                                inliers_mask = np.zeros(len(good_matches))
                                for i in range(len(good_matches)):
                                    if i in index_inliers:
                                        inliers_mask[i] = 1
                                
                                ## Retrieve matches that are inliers, and their index
                                ## wrt the actual good matches list
                                inliers_matches = [good_matches[i] for i in range(len(good_matches)) if inliers_mask[i]]
                                index_inliers_matches = [i for i in range(len(good_matches)) if inliers_mask[i]]
                                
                                ## Retrieve coordinates of features keypoints in its
                                ## image, for ones that are inliers
                                new_src_pts = np.float32([template_keypoints[m.trainIdx].pt for m in inliers_matches]).reshape(-1, 1, 2)
                                new_dst_pts = np.float32([test_keypoints[m.queryIdx].pt for m in inliers_matches]).reshape(-1, 1, 2)
                                
                                ## Apply LMEDS algorithm to fit a new homograpy,
                                ## taking into account all previous inliers
                                H, inliers_mask = cv2.findHomography(new_src_pts,new_dst_pts,cv2.LMEDS, 10.0)
                                
                                ## If no available homograpies exist, the algorithm ends
                                if H is not None:
                                    ## Create a list representing all the inliers of the retrieved hompgrapy
                                    matches_mask = inliers_mask.ravel().tolist()
                                    
                                    ## Retrieve coordinates of the inliers in the test image, and their index wrt the actual good matches list
                                    dst_inliers = [new_dst_pts[i] for i in range(len(new_dst_pts)) if inliers_mask[i]]
                                    index_inliers = [index for i,index in enumerate(index_inliers_matches) if inliers_mask[i]]
                                    
                                    ## Project the vertices of the abstract 
                                    ## rectangle around the template image
                                    ## in the test one, using the found homography,
                                    ## in order to localize the template in the scene
                                    dst_vrtx = cv2.perspectiveTransform(src_vrtx, H)
                                    
                                    ## If the homography is degenerate, it is discarded
                                    if is_rank_full(H, discarded_cont_count, discarded_homographies, discarded_file):
                                        
                                        ## Create a polygon using the projected vertices
                                        polygon = Polygon([(dst_vrtx[0][0][0], dst_vrtx[0][0][1]),
                                                           (dst_vrtx[1][0][0], dst_vrtx[1][0][1]),
                                                           (dst_vrtx[2][0][0], dst_vrtx[2][0][1]),
                                                           (dst_vrtx[3][0][0], dst_vrtx[3][0][1])])
                                       
                                        ## Apply the inverse of the found homography to the scene image
                                        ## in order to rectify the object in the polygon and extract the 
                                        ## bounded image region from the rectified one containing the template instance
                                        H_inv = inv(H)
                                        rect_test_image = cv2.warpPerspective(test_image,H_inv,(w,h))
                                        
                                        ## Equalize both template and rectified image
                                        (equalized_template_image,
                                         equalized_rect_test_image) = equalize_template_and_rectified_scene(template_image,
                                                                                                            rect_test_image)
                                        
                                        ## Compute the difference between equalized
                                        ## template and equalized rectified image
                                        abs_diff_image = cv2.absdiff(equalized_template_image,
                                                                     equalized_rect_test_image)
                                        
                                        ## Compute the image representing the
                                        ## pixelwise difference norm, and the
                                        ## version of it in which only the central
                                        ## part is keeped
                                        (diff_norm_image, 
                                         diff_norm_image_central) = difference_norm_image_computation(abs_diff_image, 
                                                                                                      IMAGE_RATIO_TO_CROP)
                                        
                                        ## Check that the pixelwise difference norm
                                        ## median of a central region of the
                                        ## difference image is under a certain
                                        ## threshold
                                        if pixelwise_difference_norm_check(diff_norm_image_central, 
                                                                           MEDIAN_THRESHOLD, 
                                                                           discarded_file,
                                                                           discarded_homographies,
                                                                           discarded_cont_count):
                                        
                                        ## Area confidence test
                                        #if validate_area(ALPHA, areas, polygon.area, discarded_file, discarded_homographies): 
                                            
                                            print('NEW HOMOGRAPHY FOUND!')
                                            
                                            ##print('Number of inliers out of the homography:' +  str(len(dst_inliers) - (out_points_ratio(dst_inliers, polygon)*len(dst_inliers))))
                                            ##print('Fraction of inliers out of the homography:' +  str((len(dst_inliers) - (out_points_ratio(dst_inliers, polygon)*len(dst_inliers)))/len(dst_inliers)))
                                        
                                            areas.append(polygon.area) 
                                            
                                            ## Draw the projected polygon in the test image, in order to visualize the found template in the test image
                                            polygons_image = cv2.polylines(test_image_squares, [np.int32(dst_vrtx)], True, [255,255,255], 3, cv2.LINE_AA)
                                            
                                            ## Specify parameters for the function that shows clustered matches, i.e. all the inliers for the selceted homography
                                            draw_params = dict(matchColor=(0, 255, 0),  # draw matches in green
                                                               singlePointColor=None,
                                                               matchesMask=matches_mask,  # draw only inliers
                                                               flags=2)
                                            
                                            ## Draw clustered matches
                                            matches_image = cv2.drawMatches(polygons_image, test_keypoints, template_image, template_keypoints, inliers_matches, None, **draw_params)
                                            
                                            ## Plot the clustered matches
                                            plt.imshow(cv2.cvtColor(matches_image, cv2.COLOR_BGR2RGB)), plt.title('Clustered matches'), plt.show()
                                            
                                            ## Put back, inside the good matches list, points temporary removed
                                            #good_matches.extend(temporary_removed_matches)
                                            #temporary_removed_matches.clear()
                                            
                                            ## Remove all matches in the polygon
                                            keep_mask = 1 - remove_mask(test_keypoints, good_matches, polygon)
                                            good_matches = [good_matches[i] for i in range(len(keep_mask)) if keep_mask[i]]
                                            
                                            ## Keep the length of this mask compatible
                                            ## with good_matches length
                                            new_good_rescued_self_similar_mask = [good_rescued_self_similar_mask[i] for i in
                                                                              range(len(keep_mask)) if keep_mask[i]]
                                            new_good_rescued_self_similar_mask = np.asarray(new_good_rescued_self_similar_mask)
                                            
                                            ## Apply the homography to all test_keypoints in order to plot them
                                            object_test_keypoints = project_keypoints(test_keypoints, H_inv)
                                            
                                            ## Specify parameters for the function that shows clustered matches, i.e. all the inliers for the selceted homography
                                            draw_params = dict(matchColor=(0, 255, 0),  # draw matches in green
                                                               singlePointColor=None,
                                                               matchesMask=matches_mask,  # draw only inliers
                                                               flags=2)
                                            
                                            ## Draw clustered rectified matches
                                            matches_image = cv2.drawMatches(rect_test_image,
                                                                            object_test_keypoints,
                                                                            template_image,
                                                                            template_keypoints,
                                                                            inliers_matches,
                                                                            None,
                                                                            **draw_params)
                                            
                                            ## Show the rectified matches and image
                                            plt.imshow(cv2.cvtColor(matches_image, cv2.COLOR_BGR2RGB)), 
                                            plt.title('Rectified object matches'), plt.show()
                                            rect_stacked_image = np.hstack((rect_test_image, template_image))
                                            plt.imshow(cv2.cvtColor(rect_stacked_image, cv2.COLOR_BGR2RGB)),
                                            plt.title('Rectified object image'), plt.show()
                                            
                                            ## Plot the equalized template and
                                            ## rectified image
                                            equalized_rect_stacked_image = np.hstack((equalized_rect_test_image,
                                                                                      equalized_template_image))
                                            plt.imshow(cv2.cvtColor(equalized_rect_stacked_image, cv2.COLOR_BGR2RGB)), 
                                            plt.title('Equalized template and object image'), plt.show()
                                            
                                            ## Plot the difference between equalized
                                            ## template and equalized rectified image and its histogram
                                            difference_plot_and_histogram(abs_diff_image)
                                            
                                            ## Plot the images of the pixelwise
                                            ## difference norm, and the histrogram
                                            ## of the one representing the
                                            ## central part, highlighting the median
                                            pixelwise_difference_plot_and_histogram(diff_norm_image,
                                                                                    diff_norm_image_central,
                                                                                    MEDIAN_THRESHOLD)
            
                                            ## Show the number of discarded homographies until now
                                            print_discarded(discarded_homographies)
                                            
                                            ## Show the number of good matches left
                                            print('There remains: ' + str(len(good_matches)) + ' features')
                                            
                                            ## Show the number of good homograpies until now
                                            print("Found " + str(len(areas)) + " homographies until now")
                                            discarded_file.write("HOMOGRAPHY FOUNDED #"+str(len(areas))+"\n\n")
                                            
                                            ## Show the number of rescued self-similar
                                            ## matches effectively used to find a good
                                            ## homography until now
                                            rescued_self_similar_used(flat_rescued_self_similar_mask,
                                                                      good_rescued_self_similar_mask,
                                                                      new_good_rescued_self_similar_mask,
                                                                      1-keep_mask,
                                                                      self_similar_per_image,
                                                                      matches_mask,
                                                                      index_inliers_matches,
                                                                      inliers_per_image)
                                            good_rescued_self_similar_mask = new_good_rescued_self_similar_mask
                                            
                                            discarded_cont_count[0] = 0
                                           
                                else:
                                    print("Not possible to find another homography")
                                    end = True
                        else:
                            print("Not enough matches are found in the last homography - {}/{}".format(np.count_nonzero(matches_mask), MIN_MATCH_CURRENT))
                            end = True
                else:
                    print("Not possible to find another homography")
                    end = True
            else:
                print("Not enough matches are found - {}/{}".format(len(good_matches), MIN_MATCH_COUNT))
                end = True
        else:
           print("Discarded "+str(discarded_cont_count[0])+" homography in a row. Not able to find other homography")
           end = True 
        
    ## Show the final image, in which all templates found are drawn
    if len(areas)!=0: plt.imshow(cv2.cvtColor(polygons_image, cv2.COLOR_BGR2RGB)), plt.title('final image'),plt.show()
    
    ## Show the number of discarded homographies until now
    print_discarded(discarded_homographies)
    
    ## Show the final number of good homographies found
    print("Found " + str(len(areas)) + " homographies")
    
    ## Show self similar statistics
    print_self_similar_stats(inliers_per_image, self_similar_per_image, number_rescued_self_similar, good_rescued_self_similar_mask)
    
    ## Close debug file
    discarded_file.close()
   
    return (int(sum(self_similar_per_image)), number_rescued_self_similar -int(sum(self_similar_per_image)),len(good_matches) )
Example #17
0
res = None
sift = cv2.xfeatures2d.SIFT_create(
)  # SIFT객체 생성, SIFT의 키포인트, 디스크립터들을 계산하는 함수 제공
while True:  # 영상을 반복적으로 갭쳐
    # SIFT 검출 -> 기술
    KeyPoint1, des1 = sift.detectAndCompute(
        image1, None)  # image1에서 키포인트와 디스크립터를 한번에 계산하고 반환
    KeyPoint2, des2 = sift.detectAndCompute(
        image2, None)  # image2에서 키포인트와 디스크립터를 한번에 계산하고 반환
    # FLANN 매칭
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE,
                        trees=5)  # SIFT와 SURF를 사용할 경우에 사전자료 생성
    search_params = dict(checks=50)  # 특성 매칭을 위한 반복 횟수

    Flann = cv2.FlannBasedMatcher(
        index_params, search_params)  # FLANN기반 매칭 객체를 위에서 구성한 사전 자료 형태의 인자를
    matches = Flann.knnMatch(des1, des2, k=2)  # 이용해 생성, 그 설정된 순위(k=2)만큼 반환

    good = []
    for m, n in matches:  # matches의 각 멤버에서 1순위 매칭 결과가 2순위 매칭 결과의 factor로(0.7)로
        if m.distance < 0.7 * n.distance:  # 주어진 비율보다 더 가까운 값만을 취한다.
            good.append(m)  # 1순위 매칭 결과가 2순위 매칭 결과의 0.7배보다 더 가까운 값만을 취한다.
    res = cv2.drawMatches(image1,
                          KeyPoint1,
                          image2,
                          KeyPoint2,
                          good,
                          res,
                          flags=2)  # 두개의 이미지 간의 동일특징점을 선으로 연결

    # 이미지 출력
Example #18
0
    def match(self, image, category):
        self.c.execute("SELECT * FROM images WHERE label=?", (category, ))
        data = self.c.fetchall()

        # Converting the image to gray scale
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # Initiate SIFT and SURF detectors
        sift = cv2.xfeatures2d.SIFT_create()
        surf = cv2.xfeatures2d.SURF_create()

        # find the keypoints and descriptors with SIFT or SURF or ORB
        kp1, des1 = surf.detectAndCompute(image, None)

        # FLANN parameters
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)  # or pass empty dictionary
        flann = cv2.FlannBasedMatcher(index_params, search_params)

        best_ratio, i = 0, 1
        for row in data:
            matches = flann.knnMatch(des1, row[1], k=2)

            # Apply ratio test
            good = []
            for m, n in matches:
                if m.distance < 0.7 * n.distance:
                    good.append([m])

            c_ratio = len(good) / len(row[1])
            print("[{}] {0:.2f}".format(row[0].split(os.path.sep)[-1],
                                        c_ratio))

            if c_ratio > best_ratio:
                best_ratio = c_ratio
                imagePath = row[0]
                # matches_ = matches

            i += 1

        print("2-[IMAGE MATCHING]       | ratio: {0:.2f}".format(best_ratio))

        if best_ratio >= 0.5:

            # ref = cv2.imread(imagePath,0)
            # dim = (image.shape[1], image.shape[0])
            # ref = cv2.resize(ref, dim, interpolation = cv2.INTER_AREA)

            # kp2, des2 = surf.detectAndCompute(ref, None)
            # matches = flann.knnMatch(des1,des2,k=2)

            # Need to draw only good matches, so create a mask
            # matchesMask = [[0,0] for i in range(len(matches))]
            # for i,(m,n) in enumerate(matches):
            #	if m.distance < 0.7*n.distance:
            #		matchesMask[i]=[1,0]

            # draw_params = dict(matchColor = (0,255,0),
            #				   singlePointColor = (255,0,0),
            #				   matchesMask = matchesMask,
            #				   flags = 0)

            # output = cv2.drawMatchesKnn(image, kp1, ref, kp2, matches, None, **draw_params)
            # cv2.imwrite("output.jpg", output)

            return imagePath
        else:
            return ""
Example #19
0
def main():
    import sys, getopt
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)
    feature_name = opts.get('--feature', 'brisk')
    print(args)
    try:
        fn1, fn2 = args
    except:
        fn1 = 'imgs/test1_clear.jpg'
        fn2 = 'imgs/test1_affine.jpg'

    detector, matcher = init_feature(feature_name)

    print('using', feature_name)

    img1 = cv.imread(fn1, cv.IMREAD_GRAYSCALE)
    kp1, desc1 = detector.detectAndCompute(img1, None)

    img2 = cv.imread(fn2, cv.IMREAD_GRAYSCALE)
    kp2, desc2 = detector.detectAndCompute(img2, None)

    if img1 is None:
        print('Failed to load fn1:', fn1)
        sys.exit(1)

    if img2 is None:
        print('Failed to load fn2:', fn2)
        sys.exit(1)

    if detector is None:
        print('unknown feature:', feature_name)
        sys.exit(1)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(desc1, desc2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good_matches = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good_matches.append(m)

    MIN_MATCH_COUNT = 3

    if len(good_matches) > MIN_MATCH_COUNT:

        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good_matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good_matches]).reshape(-1, 1, 2)

        M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()

        h, w = img1.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        # pts = np.array([pts])
        if M is not None:
            dst = cv.perspectiveTransform(pts, M)
            # dst += (w, 0)  # adding offset

            # Draw bounding box in Red
            img2 = cv.polylines(img2, [np.int32(dst)], True, (0, 0, 255), 3,
                                cv.LINE_AA)
        else:
            print("M was none")
    else:
        print("Not enough matches are found - %d/%d" %
              (len(good_matches), MIN_MATCH_COUNT))
        matchesMask = None

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        matchesMask=matchesMask,  # draw only inliers
        flags=2)

    img3 = cv.drawMatches(img1, kp1, img2, kp2, good_matches, None,
                          **draw_params)

    cv.imshow("result", img3)

    # def match_and_draw(win):
    #     print('matching...')
    #     raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
    #     p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
    #     if len(p1) >= 4:
    #         H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
    #         print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
    #     else:
    #         H, status = None, None
    #         print('%d matches found, not enough for homography estimation' % len(p1))
    #
    #     _vis = explore_match(win, img1, img2, kp_pairs, status, H)
    #
    # match_and_draw('find_obj')

    cv.waitKey()

    print('Done')
Example #20
0
# get data, setup output dir
res_dir = 'res/lift/%s' % args.trials
if not os.path.exists(res_dir):
    os.makedirs(res_dir)

# write human readable logs
f = open(os.path.join(res_dir, 'log.txt'), 'w')
f.write('lift\n')
f.write('thresh_overlap: %d\n' % cst.THRESH_OVERLAP)
f.write('thresh_desc: %d\n' % cst.THRESH_DESC)

norm = 'L2'
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
matcher = cv2.FlannBasedMatcher(index_params, search_params)

i_des_dir = os.path.join('res/lift/', args.lift_data_id, 'des_no_aug')
v_des_dir = os.path.join('res/lift/', args.lift_data_id, 'des_aug')
lift_dir = i_des_dir

global_start_time = time.time()
H = np.eye(3)
for scene_name in cst.SCENE_LIST:

    duration = time.time() - global_start_time
    print('*** %s *** %d:%02d' % (scene_name, duration / 60, duration % 60))
    f.write('*** %s *** %d:%02d\n' %
            (scene_name, duration / 60, duration % 60))

    img_dir = os.path.join(cst.DATA_DIR, scene_name, 'test/image_color')
Example #21
0
def feature_matching(image,
                     template,
                     obj_kp,
                     obj_desc,
                     feature_detector: cv2.Feature2D,
                     mask=None,
                     match_method='flann'):
    ''' Method to perform feature detection
        
        @param image: the input image for detection
        @param obj_kp: the object keypoints
        @param obj_desc: the object feature descriptors 
        @param feature_detector (Feature2D class): feature detector of choice
        @param mask (Default = None):  the mask of the image to search through
        
        
        @returns (x_trans, y_trans, xform) which is the translation from the object keypoints
                to the image matches. For use in Kalman Filter, you will need to 
                add this translation to the original detected object (potentially).
                It also returns the affine transform fit (just in case).
                    
    
    '''
    # gray scale the image
    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # begin feature detection
    img_kp, img_desc = feature_detector.detectAndCompute(image_gray, mask)

    # begin feature matching
    if match_method == 'flann':
        # default params from opencv-pytutorials
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        # create flann matcher
        matcher = cv2.FlannBasedMatcher(index_params, search_params)

    # if

    elif match_method == 'brute-force':
        matcher = cv2.BFMatcher_create()

    # elif

    else:
        raise NotImplementedError(
            f"'{match_method}' not an implemented matching method")

    # else

    matches = matcher.knnMatch(obj_desc, img_desc, k=2)
    good_matches = [m for m, n in matches if m.distance < 0.8 * n.distance
                    ]  # filter out for good matches

    # grab keypoint matches
    keypts_obj = []
    keypts_img = []
    for idx in range(len(good_matches)):
        keypt_obj, keypt_img = get_keypoint_coord_from_match(
            good_matches, obj_kp, img_kp, idx)
        keypts_obj.append(keypt_obj)
        keypts_img.append(keypt_img)

    # for
    keypts_obj = np.array(keypts_obj)
    keypts_img = np.array(keypts_img)

    # get affine transform
    ransac_num_samples = 6
    if len(good_matches) >= ransac_num_samples:
        xform, _, ransac_matches = ransac(good_matches,
                                          obj_kp,
                                          img_kp,
                                          num_samples=ransac_num_samples)
        pt_temp_com = np.array([template.shape[1], template.shape[0]]).reshape(
            1, 1, 2) / 2
        pt_img_com = cv2.transform(pt_temp_com, xform).squeeze()
        x_com = pt_img_com[0]
        y_com = pt_img_com[1]

        image_match = cv2.drawMatches(template, obj_kp, image, img_kp,
                                      ransac_matches, None)

    # if

    elif len(good_matches) > 0:
        xform = get_affine_transform(keypts_obj, keypts_img)
        pt_temp_com = np.array([template.shape[1], template.shape[0]]).reshape(
            1, 1, 2) / 2
        pt_img_com = cv2.transform(pt_temp_com, xform).squeeze()
        x_com = pt_img_com[0]
        y_com = pt_img_com[1]

        image_match = cv2.drawMatches(template, obj_kp, image, img_kp,
                                      good_matches, None)

    # elif

    else:
        x_com, y_com = (None, None)
        xform = None
        image_match = None

    # else

    # if

    return x_com, y_com, xform, image_match
Example #22
0
            cir_r = int(circle[2])
        dst = frame[(cir_y + y - r - cir_r):(cir_y + y - r + cir_r),
                    (cir_x + x - r - cir_r):(cir_x + x - r + cir_r)]
        #dst = frame[(y-r):(y+r),(x-r):(x+r)]#截圆
    print('circle ok')
    #cv2.imshow("2",dst)
    cv2.imwrite(path_circle + str(num) + '.jpg', dst)

    #开始过模型
    if num == 0:
        t1 = time.time()  #开始计时
    with open('zuixinfl', 'rb') as fr:
        vocabulary = pickle.load(fr)
    extract = cv2.xfeatures2d.SIFT_create()
    flann_params = dict(algorithm=1, trees=5)
    flann = cv2.FlannBasedMatcher(flann_params, {})
    extract_bow = cv2.BOWImgDescriptorExtractor(extract, flann)
    extract_bow.setVocabulary(vocabulary)

    f = extract_bow.compute(dst, extract.detect(dst))
    x1 = np.array(f)

    with open('svm2.pickle', 'rb') as fr:
        new_svm = pickle.load(fr)
        print(new_svm.predict(x1))  #得出结果

        #得出的结果分开两个文件夹
        if new_svm.predict(x1)[0] == 1:
            cv2.imwrite(path_youchong + str(num) + '.jpg', dst)
        elif new_svm.predict(x1)[0] == 2:
            cv2.imwrite(path_fengmi + str(num) + '.jpg', dst)
Example #23
0
    def __init__(self):
        self.sift = cv2.SIFT_create()

        self.flann = cv2.FlannBasedMatcher(dict(algorithm = 1, trees = 5), dict(checks=50))
Example #24
0
def stitch_to_base(img1, img2):
    '''

    :param base_im:
    :param n_im:
    :return:
    '''
    # Read the base image
    base_img = cv2.GaussianBlur(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY), (5, 5), 0)
    # Read in the next image
    next_img = cv2.GaussianBlur(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY), (5, 5), 0)

    # Use the SIFT feature detector
    detector = cv2.xfeatures2d.SIFT_create()

    # Find key points in base image for motion estimation
    base_features, base_descs = detector.detectAndCompute(base_img, None)

    # Parameters for nearest-neighbor matching
    FLANN_INDEX_KDTREE = 1
    flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    matcher = cv2.FlannBasedMatcher(flann_params, {})

    print("\t Finding points...")
    # Find points in the next frame
    next_features, next_descs = detector.detectAndCompute(next_img, None)
    matches = matcher.knnMatch(next_descs, trainDescriptors=base_descs, k=2)
    print("\t Match Count: {}".format(len(matches)))

    matches_subset = []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
            matches_subset.append(m[0])

    print("\t Filtered Match Count: {}".format(len(matches_subset)))
    distance = 0.0

    for match in matches_subset:
        distance += match.distance
    print("\t Distance from Key Image: {}".format(distance))

    averagePointDistance = distance / float(len(matches_subset))
    print("\t Average Distance: {}".format(averagePointDistance))

    kp1 = []
    kp2 = []

    for match in matches_subset:
        kp1.append(base_features[match.trainIdx])
        kp2.append(next_features[match.queryIdx])

    p1 = np.array([k.pt for k in kp1])
    p2 = np.array([k.pt for k in kp2])

    H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)

    print("{0} / {1}  inliers/matched".format(np.sum(status), len(status)))

    H = H / H[2, 2]
    H_inv = linalg.inv(H)

    (min_x, min_y, max_x, max_y) = findDimensions(next_img, H_inv)

    # Adjust max_x and max_y by base img size
    max_x = max(max_x, base_img.shape[1])
    max_y = max(max_y, base_img.shape[0])

    move_h = np.matrix(np.identity(3), np.float32)

    if (min_x < 0):
        move_h[0, 2] += -min_x
        max_x += -min_x

    if (min_y < 0):
        move_h[1, 2] += -min_y
        max_y += -min_y

    mod_inv_h = move_h * H_inv

    img_w = int(math.ceil(max_x))
    img_h = int(math.ceil(max_y))

    print("New Dimensions: ", (img_w, img_h))

    # crop edges
    print("Cropping...")
    base_h, base_w = base_img.shape
    next_h, next_w = next_img.shape

    base_img_rgb = img1[5:(base_h - 5), 5:(base_w - 5)]
    next_img_rgb = img2[5:(next_h - 5), 5:(next_w - 5)]

    # Warp the new image given the homography from the old image
    base_img_warp = cv2.warpPerspective(base_img_rgb, move_h, (img_w, img_h))
    print("Warped base image")
    plt.imshow(base_img_warp)
    plt.show()


    next_img_warp = cv2.warpPerspective(next_img_rgb, mod_inv_h, (img_w, img_h))
    print("Warped next image")
    plt.imshow(next_img_warp)
    plt.show()

    # Put the base image on an enlarged palette
    enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8)

    (ret, data_map) = cv2.threshold(cv2.cvtColor(next_img_warp, cv2.COLOR_BGR2GRAY), 0, 255, cv2.THRESH_BINARY)

    # add base image
    enlarged_base_img = cv2.add(enlarged_base_img, base_img_warp, mask=np.bitwise_not(data_map), dtype=cv2.CV_8U)

    # add next image
    final_img = cv2.add(enlarged_base_img, next_img_warp, dtype=cv2.CV_8U)
    plt.imshow(final_img)
    plt.show()
    # Crop black edge
    final_gray = cv2.cvtColor(final_img, cv2.COLOR_BGR2GRAY)
    _, thresh = cv2.threshold(final_gray, 1, 255, cv2.THRESH_BINARY)
    dino, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    max_area = 0
    best_rect = (0, 0, 0, 0)

    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        deltaHeight = h - y
        deltaWidth = w - x

        area = deltaHeight * deltaWidth
        if (area > max_area and deltaHeight > 0 and deltaWidth > 0):
            max_area = area
            best_rect = (x, y, w, h)

    if (max_area > 0):
        final_img_crop = final_img[best_rect[1]:best_rect[1] + best_rect[3], best_rect[0]:best_rect[0] + best_rect[2]]

        final_img = final_img_crop
    # output
    final_filename = "map1/base_im.jpg"
    cv2.imwrite(final_filename, final_img)
Example #25
0
queryImage = cv2.imread('../images/bathory_album.jpg', 0)
trainingImage = cv2.imread('../images/bathory_vinyls.jpg', 0)

# create SIFT and detect/compute
# xfeatures2d 在opencv()3.4.0后续版本不再可以用
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(queryImage, None)
kp2, des2 = sift.detectAndCompute(trainingImage, None)

# FLANN matcher parameters
# FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=0, trees=5)
searchParams = dict(checks=50)  # or pass empty dictionary

flann = cv2.FlannBasedMatcher(indexParams, searchParams)

matches = flann.knnMatch(des1, des2, k=2)

# prepare an empty mask to draw good matches
matchesMask = [[0, 0] for i in xrange(len(matches))]

# David G. Lowe's ratio test, populate the mask
for i, (m, n) in enumerate(matches):
    if m.distance < 0.7 * n.distance:
        matchesMask[i] = [1, 0]

drawParams = dict(matchColor=(0, 255, 0),
                  singlePointColor=(255, 0, 0),
                  matchesMask=matchesMask,
                  flags=0)
Example #26
0
def feature_match(files, target):

    #img1 = cv2.imread(target,0)   # qurey image
    tar_im = Image.open(target).convert('L')  # read target image as gray scale
    img1 = np.array(tar_im)

    for s in files:
        print "matching image on %s" % s
        img2 = cv2.imread(s, 0)
        #orb = cv2.ORB_create()
        #
        #kp1, des1 = orb.detectAndCompute(img1, None)
        #kp2, des2 = orb.detectAndCompute(img2, None)
        #
        #bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
        #
        #matches = bf.match(des1, des2)
        #
        #matches = sorted(matches, key = lambda x:x.distance)

        #img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:6],None, flags = 2)
        #
        #cv2.imwrite('feature_detect_img.jpg', img3)
        #
        #plt.imshow(img3),plt.show()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)

        ### BFMatcher with default params
        ##bf = cv2.BFMatcher()
        ##matches = bf.knnMatch(des1,des2, k=2)

        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)  # or pass empty dictionary

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(des1, des2, k=2)

        # Apply ratio test
        good = []
        knnlst = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)
                knnlst.append([m])

        if len(good) > 20:
            # cv2.drawMatchesKnn expects list of lists as matches.

            #img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,knnlst,None,flags=2)   # match distances

            rows2 = img2.shape[0]
            cols2 = img2.shape[1]

            out = np.zeros((max([rows2]), cols2, 3), dtype='uint8')

            # Place the first image to the left
            #out[:rows1, :cols1] = np.dstack([img1])

            # Place the next image to the right of it
            out[:rows2, :cols2] = np.dstack([img2])

            chker = []

            for mat in good:
                # Get the matching keypoints for each of the images
                #img1_idx = mat.queryIdx
                img2_idx = mat.trainIdx

                # x - columns
                # y - rows
                #(x1,y1) = kp1[img1_idx].pt
                (x2, y2) = kp2[img2_idx].pt

                # Draw a small circle at both co-ordinates
                # radius 4
                # colour green
                # thickness = 1
                chker.append((x2, y2))
                cv2.circle(out, (int(x2), int(y2)), 3, (0, 255, 0), 1)

            #plt.imshow(out,),plt.show()
            #cv2.imwrite('feature_out_img.jpg', out)

            #cv2.imwrite('feature_detect_img.jpg', img3)

            #plt.imshow(img3),plt.show()

            if chker:
                min_x = int(min(chker, key=lambda x: x[0])[0])
                max_x = int(max(chker, key=lambda x: x[0])[0])
                min_y = int(min(chker, key=lambda x: x[1])[1])
                max_y = int(max(chker, key=lambda x: x[1])[1])

                top_left = (min_x, min_y)
                bottom_right = (max_x, max_y)
                center_point = ((min_x + max_x) / 2, (min_y + max_y) / 2)

                cv2.rectangle(out, top_left, bottom_right, (0, 255, 255), 2)
                cv2.circle(out, center_point, 5, (255, 0, 0), 1)

                cv2.imwrite('feature_box_img.jpg', out)

                #plt.imshow(out,),plt.show()
                data = {
                    "name": s,
                    "centerPoint": center_point,
                    "topLeft": top_left,
                    "bottomRight": bottom_right
                }
                return data
    print "nothing mached"

    data = {"Image": None, 'center_point': (0, 0)}

    return (data)
Example #27
0
 def __init__(self):
     self.detector = cv.ORB_create(nfeatures=100)
     self.matcher = cv.FlannBasedMatcher(
         flann_params, {})  # bug : need to pass empty dict (#1329)
     self.targets = []
     self.frame_points = []
def main():
    # FLANN parameters
    FLANN_INDEX_LSH = 6
    index_params = dict(
        algorithm=FLANN_INDEX_LSH,
        table_number=6,  # 12
        key_size=16,  # 20
        multi_probe_level=1)  #2
    search_params = dict(checks=50)  # or pass empty dictionary

    flann = cv.FlannBasedMatcher(index_params, search_params)

    orb = cv.ORB_create()
    forward_template = "/home/tinker/catkin_ws/dataset/marathon/marker_images/template/forward.png"
    right_template = "/home/tinker/catkin_ws/dataset/marathon/marker_images/template/right.png"
    left_template = "/home/tinker/catkin_ws/dataset/marathon/marker_images/template/left.png"
    iteration = 1
    while True:
        test_file = "/home/tinker/catkin_ws/dataset/marathon/marker_images/right/right (" + str(
            iteration) + ").png"
        # test_file = "/home/tinker/catkin_ws/dataset/marathon/marker_images/line/line (8).png"
        forward_img = cv.imread(forward_template, 0)
        right_img = cv.imread(right_template, 0)
        left_img = cv.imread(left_template, 0)

        test_img = cv.imread(test_file, 0)

        kp_forward, des_forward = orb.detectAndCompute(forward_img, None)
        kp_left, des_left = orb.detectAndCompute(left_img, None)
        kp_right, des_right = orb.detectAndCompute(right_img, None)
        kp_test, des_test = orb.detectAndCompute(test_img, None)

        right_matches = []
        right_matches_mask = []
        total_right_match = 0
        if des_right is not None and des_test is not None:
            right_matches = flann.knnMatch(des_right, des_test, k=2)
            right_matches_mask = [[0, 0] for i in range(len(right_matches))]
            for i in range(len(right_matches)):
                if len(right_matches[i]) > 1:
                    m, n = right_matches[i]
                    if m.distance < 0.9 * n.distance:
                        right_matches_mask[i] = [1, 0]
                        total_right_match += 1

        draw_params = dict(matchColor=(0, 255, 0),
                           singlePointColor=(255, 0, 0),
                           matchesMask=right_matches_mask,
                           flags=cv.DrawMatchesFlags_DEFAULT)
        right_result = cv.drawMatchesKnn(right_img, kp_right, test_img,
                                         kp_test, right_matches, None,
                                         **draw_params)
        cv.imshow("right_result", right_result)

        # Left
        left_matches = []
        left_matches_mask = []
        total_left_match = 0
        if des_left is not None and des_test is not None:
            left_matches = flann.knnMatch(des_left, des_test, k=2)
            left_matches_mask = [[0, 0] for i in range(len(left_matches))]
            for i in range(len(left_matches)):
                if len(left_matches[i]) > 1:
                    m, n = left_matches[i]
                    if m.distance < 0.9 * n.distance:
                        left_matches_mask[i] = [1, 0]
                        total_left_match += 1

        draw_params = dict(matchColor=(0, 255, 0),
                           singlePointColor=(255, 0, 0),
                           matchesMask=left_matches_mask,
                           flags=cv.DrawMatchesFlags_DEFAULT)
        left_result = cv.drawMatchesKnn(left_img, kp_left, test_img, kp_test,
                                        left_matches, None, **draw_params)
        cv.imshow("left_result", left_result)

        # Forward
        forward_matches = []
        forward_matches_mask = []
        total_forward_match = 0
        if des_forward is not None and des_test is not None:
            forward_matches = flann.knnMatch(des_forward, des_test, k=2)
            forward_matches_mask = [[0, 0]
                                    for i in range(len(forward_matches))]
            for i in range(len(forward_matches)):
                if len(forward_matches[i]) > 1:
                    m, n = forward_matches[i]
                    if m.distance < 0.9 * n.distance:
                        forward_matches_mask[i] = [1, 0]
                        total_forward_match += 1

        draw_params = dict(matchColor=(0, 255, 0),
                           singlePointColor=(255, 0, 0),
                           matchesMask=forward_matches_mask,
                           flags=cv.DrawMatchesFlags_DEFAULT)
        forward_result = cv.drawMatchesKnn(forward_img, kp_forward, test_img,
                                           kp_test, forward_matches, None,
                                           **draw_params)
        cv.imshow("forward_result", forward_result)

        match_result = [
            total_forward_match, total_right_match, total_left_match
        ]
        max_idx = match_result.index(max(match_result))

        if (max_idx == 0):
            print("Forward")
        elif (max_idx == 1):
            print("Right")
        elif (max_idx == 2):
            print("Left")

        # if total_left_match > total_right_match:
        #     print("Kiri")
        # else:
        #     print("Kanan")

        k = cv.waitKey(1)
        if k == 27:
            break
        elif k == ord('n'):
            iteration += 1
Example #29
0
            except Exception as e:
                print(e)
                continue

            # img=cv.drawKeypoints(img1, kp_ori, img1, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            # cv.imwrite('sift_keypoints_pan.jpg', img)

            # img=cv.drawKeypoints(img2, kp_ref, img2, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            # cv.imwrite('sift_keypoints_ref.jpg', img)

            # FLANN parameters
            FLANN_INDEX_KDTREE = 1
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            search_params = dict(checks=50)  # or pass empty dictionary

            flann = cv.FlannBasedMatcher(index_params, search_params)
            matches = flann.knnMatch(des_ori, des_ref, k=2)

            # matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
            # matches = matcher.knnMatch(des_ori, des_ref, 2)

            # BFMatcher with default params
            # bf = cv.BFMatcher()
            # matches = bf.knnMatch(des_ori, des_ref, k=2)

            # good = [m1 for (m1, m2) in matches if m1.distance * 1.5 < m2.distance]
            good = [
                m1 for (m1, m2) in matches if m1.distance < 0.7 * m2.distance
            ]

            good_matches = sorted(good, key=lambda x: x.distance)
Example #30
0
    def SLOT_query_camera(self):

        sift = cv2.xfeatures2d.SIFT_create()

        # Camera Frame
        ret, frame = self._camera_device.read()  # get camera image
        kp_frame, desc_frame = sift.detectAndCompute(
            frame, None)  # get keypoints and descriptions
        frame = cv2.drawKeypoints(
            frame, kp_frame,
            frame)  # this will draw the keypoints on the camera captured frame
        pixmap_frame = self.convert_cv_to_pixmap(frame)
        self.live_image_label.setPixmap(
            pixmap_frame
        )  # this can only print pixelmaps, prints frame with keypoints

        # Selected Image
        # #print(image_path)
        img = cv2.imread(image_path, 0)
        kp_image, desc_image = sift.detectAndCompute(img, None)
        #img = cv2.drawKeyPoints(img, kp_image, img) # draw the keypoints on our image, pass it where you want to draw, the keypoints, and outer image (img)

        # Feature Matching
        index_params = dict(algorithm=0, trees=5)
        search_params = dict()
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(desc_image, desc_frame, k=2)

        good_points = []
        # m and n are arrays, m holds the original image, and n holds the camera cap. grayframe
        for m, n in matches:
            # to avoid many false results, take descriptors that have short distances between them
            # play with this constant in front of n.distance: 0.6, 0.8
            if m.distance < 0.6 * n.distance:
                good_points.append(m)

        img3 = cv2.drawMatches(img, kp_image, frame, kp_frame, good_points,
                               frame)
        cv2.imshow("Matches", img3)

        # Homography
        # if we find at least 10 matches, we will draw homography
        # anywhere that mentions query, i mean the img, and train refers to the camera captured frame
        if len(good_points) > 10:
            # queryIdx gives us the points of the query image (from our m array)
            # the .reshape just changes the shape of the numpy array
            query_pts = np.float32([
                kp_image[m.queryIdx].pt for m in good_points
            ]).reshape(-1, 1, 2)
            train_pts = np.float32([
                kp_frame[m.trainIdx].pt for m in good_points
            ]).reshape(-1, 1, 2)

            # matrix shows object from its perspective?
            matrix, mask = cv2.findHomography(query_pts, train_pts, cv2.RANSAC,
                                              5.0)
            matches_mask = mask.ravel().tolist(
            )  # extract points from mask and put into a list

            # Perspective transforms, helps with homography
            h, w = img.shape  # height and width of original image
            #print(h)
            #print(w)

            pts = np.float32([[0, 0], [0, h], [w, h], [w, 0]]).reshape(
                -1, 1, 2
            )  # points gets h and w of image. does not work with int32, but float32 works
            dst = cv2.perspectiveTransform(pts, matrix)

            # convert to an integer for pixel pointers, (you can't point to a decimal of a pixel)
            # True is for "closing the lines"
            # next is the colour we select, in bgr, we have selected blue
            # thickness = 3
            homography = cv2.polylines(frame, [np.int32(dst)], True,
                                       (255, 0, 0), 3)

            cv2.imshow("Homography", homography)
        else:
            cv2.imshow("Regular Frame", frame)