Esempio n. 1
0
            cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
            cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
            cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
    vis0 = vis.copy()
    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
        if inlier:
            cv2.line(vis, (x1, y1), (x2, y2), green)

    cv2.imshow(win, vis)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

sift = cv2.xfeatures2d.SIFT_create()
#sift = cv2.SURF() SURF/ORB由于图片太小,特征点太少
#sift = cv2.ORB()
bf = cv2.BFMatcher(cv2.NORM_L2)#It is good for SIFT, SURF etc .http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html?highlight=bfmatcher
#bf = cv2.BFMatcher(cv2.NORM_HAMMING2)
def compare_SIFT(im1,im2):
    kp1,des1 = sift.detectAndCompute(im1,None)
    kp2,des2 = sift.detectAndCompute(im2,None)
    #print 'kp2',kp2
    if kp2 == []:
        dprint('kp2 does not have the keypoint')
        return
    matches = bf.knnMatch(des1, des2, k=2)#KNNMatch,可设置K = 2 ,即对每个匹配返回两个最近邻描述符,仅当第一个匹配与第二个匹配之间的距离足够小时,才认为这是一个匹配。
    #print matches
    dprint ('kp1:',len(kp1))
    dprint( 'kp1:',len(kp2))
    p1,p2,kp_pairs = filter_matches(kp1,kp2,matches)
    dprint( "matches",len(kp_pairs))
    ratio = 0
Esempio n. 2
0
import numpy as np

img_ = cv2.imread('img1_1.jpg')

img1 = cv2.cvtColor(img_, cv2.COLOR_BGR2GRAY)

img = cv2.imread('img2_2.jpg')

img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

sift = cv2.xfeatures2d.SIFT_create()

kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)

match = cv2.BFMatcher()
matches = match.knnMatch(des1, des2, k=2)

good = []
for m, n in matches:
    if m.distance < 0.03 * n.distance:
        good.append(m)

draw_params = dict(matchColor=(0, 255, 0), singlePointColor=None, flags=2)

img3 = cv2.drawMatches(img_, kp1, img, kp2, good, None, **draw_params)

MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
img1 = cv2.imread('../images/mountain1.png')
img2 = cv2.imread('../images/mountain2.png')
sift = cv2.xfeatures2d.SIFT_create()
#extract sift keypoints and descriptors
(kp1, des1) = sift.detectAndCompute(img1, None)
(kp2, des2) = sift.detectAndCompute(img2, None)
# show keypoints
out1 = cv2.drawKeypoints(img1, kp1, None)
out2 = cv2.drawKeypoints(img2, kp2, None)
cv2.imshow("Keypoints for Image-1 ( press enter to continue )", out1)
cv2.imshow("Keypoints for Image-2 ( press enter to continue )", out2)
#print(des1,des2)
cv2.waitKey(0)
cv2.destroyAllWindows()  

bf = cv2.BFMatcher()
matches12 = bf.knnMatch(des1,des2, k=2)
matches21 = bf.knnMatch(des2,des1, k=2)
#print(matches12.shape,matches21.shape)
# own implementation of matching
# filter matches by ratio test

t = 0.4
validratio12 = {}
for p in matches12:
    if (p[0].distance/p[1].distance) <= t:
        validratio12[p[0].queryIdx] = (p[0].trainIdx,p)
validratio21 = {}  
for p in matches21:
    if (p[0].distance/p[1].distance) <= t:
       validratio21[p[0].queryIdx] = (p[0].trainIdx,p)
Esempio n. 4
0
 def __init__(self):
     self.orb = cv2.ORB_create(100)
     self.bf = cv2.BFMatcher(cv2.NORM_HAMMING)
     self.last = None
Esempio n. 5
0
        p2 = (int(points2[i][0] + W1 + 30), int(points2[i][1]))
        cv2.line(visImg, p1, p2, color)

    return visImg


img1 = cv2.imread("../imgs/book1.jpg", 0)
img2 = cv2.imread("../imgs/book3.jpg", 0)

#compute SIFT
sift = cv2.xfeatures2d.SIFT_create()
key1, des1 = sift.detectAndCompute(img1, None)
key2, des2 = sift.detectAndCompute(img2, None)

#matching and triming
matches = cv2.BFMatcher(cv2.NORM_L2).match(des1, des2)
threshold = np.array([m.distance for m in matches]).mean() * 1.2
matches_trim = [m for m in matches if m.distance < threshold]

point1 = np.array([np.array(key1[m.queryIdx].pt) for m in matches_trim])
point2 = np.array([np.array(key2[m.trainIdx].pt) for m in matches_trim])
imgMatch = visMatching(img1, img2, point1, point2)

imgVis1 = visKeyPoints(img1, key1)
imgVis2 = visKeyPoints(img2, key2)

# wait key input
cv2.imshow("key points1", imgVis1)
cv2.imshow("key points2", imgVis2)
cv2.imshow("key match", imgMatch)
cv2.waitKey(0)
    def __init__(self):
        self.next_state = 'None'
        self.number_of_egg = 11
        self.hatched_egg = 0
        self._detect_frame_count = 0
        self._good = []
        self._good_without_list = []
        self._bf = cv2.BFMatcher()
        self._breeder_house_flg = 0
        self.send_command = 'None'
        self._send_command_enb = 0
        self._control_frame_count = 0
        self._temp_y = 408
        self.run_control_state = 'None'
        self.a_breeder_count = 0
        self.run_l_count = 0
        self._sel_poke_flg = 0

        self._breeder_comment = cv2.imread("data\\breeder_comment.png")
        self._sora_sel_img = cv2.imread("data\\sora_sel.png")

        self.cut_frame_h = 237
        self.cut_frame_w = 1280
        self.cross_arm_breeder1 = cv2.imread("data\\a_breeder1.png")
        self.cross_arm_breeder2 = cv2.imread("data\\a_breeder2.png")
        self.cross_arm_breeder3 = cv2.imread("data\\a_breeder3.png")
        self.breeder1 = cv2.imread("data\\breeder1.png")
        self.breeder2 = cv2.imread("data\\breeder2.png")
        self.breeder3 = cv2.imread("data\\breeder3.png")
        self.f_a_breeder = cv2.imread("data\\f_a_breeder_hand.png")
        self.f_breeder3 = cv2.imread("data\\f_breeder_hand.png")
        self.menu_sel_poke = cv2.imread("data\\menu_sel_poke.png")
        self.gray_cross_arm_breeder1 = cv2.cvtColor(self.cross_arm_breeder1,
                                                    cv2.COLOR_RGB2GRAY)
        self.gray_cross_arm_breeder2 = cv2.cvtColor(self.cross_arm_breeder2,
                                                    cv2.COLOR_RGB2GRAY)
        self.gray_cross_arm_breeder3 = cv2.cvtColor(self.cross_arm_breeder3,
                                                    cv2.COLOR_RGB2GRAY)
        self.gray_breeder1 = cv2.cvtColor(self.breeder1, cv2.COLOR_RGB2GRAY)
        self.gray_breeder2 = cv2.cvtColor(self.breeder2, cv2.COLOR_RGB2GRAY)
        self.gray_breeder3 = cv2.cvtColor(self.breeder3, cv2.COLOR_RGB2GRAY)
        self.gray_f_a_breeder = cv2.cvtColor(self.f_a_breeder,
                                             cv2.COLOR_RGB2GRAY)
        self.gray_f_breeder = cv2.cvtColor(self.f_breeder3, cv2.COLOR_RGB2GRAY)
        self.ah1, self.aw1 = self.gray_cross_arm_breeder1.shape
        self.ah2, self.aw2 = self.gray_cross_arm_breeder2.shape
        self.ah3, self.aw3 = self.gray_cross_arm_breeder3.shape
        self.h1, self.w1 = self.gray_breeder1.shape
        self.h2, self.w2 = self.gray_breeder2.shape
        self.h3, self.w3 = self.gray_breeder3.shape
        self.hf, self.wf = self.gray_f_breeder.shape
        #self.gray_cross_arm_breeder1 = np.array(self.gray_cross_arm_breeder1, dtype="float")
        #self.gray_cross_arm_breeder2 = np.array(self.gray_cross_arm_breeder2, dtype="float")
        #self.gray_cross_arm_breeder3 = np.array(self.gray_cross_arm_breeder3, dtype="float")
        self.mu_t1 = np.mean(self.gray_cross_arm_breeder1)
        self.mu_t2 = np.mean(self.gray_cross_arm_breeder2)
        self.mu_t3 = np.mean(self.gray_cross_arm_breeder3)
        self.temp1 = self.gray_cross_arm_breeder1 - self.mu_t1
        self.temp2 = self.gray_cross_arm_breeder2 - self.mu_t2
        self.temp3 = self.gray_cross_arm_breeder3 - self.mu_t3
        #self.dst = 0
        if VolatileClassRun.ALGOLISM == 'AKAZE':
            VolatileClassRun.MIN_MATCH_COUNT = 20  #あんまりすくないとfindHomographyがNoneになる
            VolatileClassRun.ratio = 0.7
            self._akaze = cv2.AKAZE_create()
            self._kp1, self._des1 = self._akaze.detectAndCompute(
                VolatileClassRun.breeder_house_img, None)
        elif VolatileClassRun.ALGOLISM == 'SURF':
            VolatileClassRun.MIN_MATCH_COUNT = 30  #あんまりすくないとfindHomographyがNoneになる
            VolatileClassRun.ratio = 0.5
            self._surf = cv2.xfeatures2d.SURF_create(400)
            self._kp1, self._des1 = self._surf.detectAndCompute(
                VolatileClassRun.breeder_house_img, None)
        elif VolatileClassRun.ALGOLISM == 'USURF':
            VolatileClassRun.MIN_MATCH_COUNT = 18  #あんまりすくないとfindHomographyがNoneになる
            VolatileClassRun.ratio = 0.6
            self._surf = cv2.xfeatures2d.SURF_create(400)
            self._surf.setUpright(True)
            self._kp1, self._des1 = self._surf.detectAndCompute(
                VolatileClassRun.breeder_house_img, None)
        elif VolatileClassRun.ALGOLISM == 'FAST':
            VolatileClassRun.MIN_MATCH_COUNT = 30  #あんまりすくないとfindHomographyがNoneになる
            VolatileClassRun.ratio = 0.5
            self._fast = cv2.FastFeatureDetector_create()
            self._brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
            self._kp1 = self._fast.detect(VolatileClassRun.breeder_house_img,
                                          None)
            self._kp1, self._des1 = self._brief.compute(
                VolatileClassRun.breeder_house_img, self._kp1)
        self.matches = self._bf.knnMatch(self._des1, self._des1, k=2)
Esempio n. 7
0
def pose_estimation():
    global f, B
    global frame_L, frame_R, frame_L_prev, frame_R_prev
    global left_image, right_image, img_L, img_R
    global left_featured_image, right_featured_image, matched_featured_image
    global img_L, frame_L, frame_L_prev, dt_L
    global flow_left_matched_featured_image, flow_left_image
    global height, width, scale

    frame_L_prev = frame_L
    frame_R_prev = frame_R

    try:
        img_L = bridge.imgmsg_to_cv2(left_image, "bgr8")
    except CvBridgeError as e:
        print(e)

    try:
        img_R = bridge.imgmsg_to_cv2(right_image, "bgr8")
    except CvBridgeError as e:
        print(e)

    img_height = len(img_L)
    img_width = len(img_L[0])
    # print(img_height)
    # print(img_width)

    #Resize image
    scale = 1
    width = int(img_L.shape[1] * scale)
    height = int(img_L.shape[0] * scale)
    dim = (width, height)  #can also just specify desired dimensions
    frame_L = cv2.resize(img_L, dim, interpolation=cv2.INTER_AREA)
    frame_R = cv2.resize(img_R, dim, interpolation=cv2.INTER_AREA)

    #Convert from BGR to gray colorspace
    frame_L = cv2.cvtColor(frame_L, cv2.COLOR_BGR2GRAY)
    frame_R = cv2.cvtColor(frame_R, cv2.COLOR_BGR2GRAY)

    img1 = frame_L.copy()
    img2 = frame_R.copy()

    # Initiate ORB detector
    orb = cv2.ORB_create()

    # find the keypoints and descriptors with ORB
    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)

    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(des1, des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    top_matches = matches[:20]

    plot_image = cv2.drawMatches(img1,
                                 kp1,
                                 img2,
                                 kp2,
                                 top_matches,
                                 None,
                                 flags=2)

    # Initialize lists
    list_kp1 = []
    list_kp2 = []
    list_disp = []
    list_z = []

    # For each match...
    for mat in top_matches:

        # Get the matching keypoints for each of the images
        img1_idx = mat.queryIdx
        img2_idx = mat.trainIdx

        # x - columns
        # y - rows
        # Get the coordinates
        (x1, y1) = kp1[img1_idx].pt
        (x2, y2) = kp2[img2_idx].pt
        if (x2 - x1 > 0.001):
            z = (f * B) / (x2 - x1)
            list_z.append(z)

        # Append to each list
        list_kp1.append((x1, y1))
        list_kp2.append((x2, y2))
        list_disp.append(x2 - x1)

    # mean_z = 0.0
    # if len(list_z)>0:
    # 	mean_z = sum(list_z)/len(list_z)
    # print(mean_z)

    matched_featured_image = bridge.cv2_to_imgmsg(plot_image, "8UC3")

    img3 = frame_L_prev.copy()

    # find the keypoints and descriptors with ORB
    kp3, des3 = orb.detectAndCompute(img3, None)

    if des3 is not None:
        # Match descriptors.
        matches = bf.match(des1, des3)

        # Sort them in the order of their distance.
        matches = sorted(matches, key=lambda x: x.distance)

        if (len(matches) > 100):
            top_matches = matches[:50]
        else:
            top_matches = matches[:20]
        # draw_params = dict(matchColor = (0,255,0),
        #                    singlePointColor = (255,0,0),
        #                    matchesMask = matchesMask,
        #                    flags = 0)

        # img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)

        plot_image = cv2.drawMatches(img1,
                                     kp1,
                                     img3,
                                     kp3,
                                     top_matches,
                                     None,
                                     flags=2)
        flow_left_matched_featured_image = bridge.cv2_to_imgmsg(
            plot_image, "8UC3")

        plot_image = img_L.copy()

        # Initialize lists
        list_kp1 = []
        list_kp2 = []
        list_vx_img = []
        list_vy_img = []

        # For each match...
        for mat in top_matches:

            # Get the matching keypoints for each of the images
            img1_idx = mat.queryIdx
            img2_idx = mat.trainIdx

            # x - columns
            # y - rows
            # Get the coordinates
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp3[img2_idx].pt
            vx_img = (x2 - x1) / dt_L
            vy_img = (y2 - y1) / dt_L

            # Append to each list
            list_kp1.append((x1, y1))
            list_kp2.append((x2, y2))

            list_vx_img.append(vx_img)
            list_vy_img.append(vy_img)

            cv2.arrowedLine(plot_image, (int(x1), int(y1)), (int(x2), int(y2)),
                            (0, 0, 255),
                            thickness=2,
                            line_type=8,
                            shift=0,
                            tipLength=0.5)

        # print(dt_L)
        # print(sum(list_vx_img)/len(list_vx_img))

        flow_left_image = bridge.cv2_to_imgmsg(plot_image, "8UC3")
def matched_positions(img1, img2, k=3, n=2, unique=False, order=0):
    '''
    tips: works better for scale within 0.5-2
    Args:
        img1: template image
        img2: target image
        k: k matches for each feature point
        n: filter for filtering out points with distance > n* min_distance
        unique: whether the object is unique 
        
    return:
        positions: first n best matches positions
    side effects:
        create a result_with_bb.png showing the matched result with bounding box
    '''
    # Initiate SIFT detector
    if unique:
        k = 1
    sift = cv.xfeatures2d.SIFT_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # BFMatcher with default params
    print("# of keypoints: ", len(kp1))
    bf = cv.BFMatcher()
    matches = bf.knnMatch(des1, des2, k)
    matches_flat = []
    # flat the match list
    for match in matches:
        for match_point in match:
            matches_flat.append(match_point)
    matches_flat = sorted(matches_flat, key=lambda x: x.distance)
    max_distance = matches_flat[len(matches_flat) - 1].distance
    min_distance = matches_flat[0].distance
    good = []
    for match in matches_flat:
        if match.distance <= max(0.02, n * min_distance):
            good.append(match)
    #extract position in the target image
    if len(good) < 5:
        good = matches_flat[:5]

    if not unique:
        positions_map = map(
            lambda x: (x.queryIdx, x.trainIdx, kp2[x.trainIdx].pt), good)
        positions = map(lambda x: x[2], positions_map)
        bandwidth = max(50,
                        estimate_bandwidth(positions, quantile=0.3, n_jobs=-1))
        if (len(good) <= 20):
            bandwidth = min(50, bandwidth)
        img1_width_up = 2 * max(img1.shape)
        print("upper_width", img1_width_up)
        bandwidth = min(img1_width_up, bandwidth)
        img1_width_low = max(img1.shape)
        bandwidth = max(img1_width_low, bandwidth)
        print("lower_width", img1_width_low)
        print('bandwidth: ', bandwidth)
        ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
        ms.fit(positions)
        labels = ms.labels_
        labels_unique = np.unique(labels)
        print("# of clusters: ", len(labels_unique))
        print("labels: ", labels)
        n_clusters = len(labels_unique)
        matched_group = []
        for i in range(0, n_clusters):
            matched_group.append({'query': [], 'train': []})
        for i in range(0, len(positions_map)):
            for j in range(0, n_clusters):
                if labels[i] == j:
                    matched_group[j]['query'].append(
                        kp1[positions_map[i][0]].pt)
                    matched_group[j]['train'].append(
                        kp2[positions_map[i][1]].pt)
        h, w = img1.shape
        corners = [[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]
        img3 = cv.drawMatchesKnn(img1, kp1, img2, kp2, [good], None, flags=2)
        for i in range(0, n_clusters):
            if len(matched_group[i]['query']) >= 5:
                try:
                    box = produce_bounding_box(corners,
                                               matched_group[i]['query'],
                                               matched_group[i]['train'])
                    # translate the box to the correct positions
                    box = map(lambda x: [[x[0][0] + w, x[0][1]]], box)
                    img3 = cv.polylines(img3, [np.int32(box)], True, 255, 3,
                                        cv.LINE_AA)
                except:
                    print("Not enough information to produce the bounding box")
        print("result_with_bb{}.png".format(order))
        cv.imwrite("result_with_bb{}.png".format(order), img3)
        return positions
    else:
        # make every matched point has a unique query point,use the matched point with best distance
        output = []
        present_train_idx = []
        present_query_idx = []
        for element in good:
            if (element.trainIdx
                    not in present_train_idx) and (element.queryIdx
                                                   not in present_query_idx):
                present_train_idx.append(element.trainIdx)
                present_query_idx.append(element.queryIdx)
                output.append(element)
        good = output
        positions_map = map(
            lambda x: (x.queryIdx, x.trainIdx, kp2[x.trainIdx].pt), good)
        positions = map(lambda x: x[2], positions_map)
        query_positions = map(lambda x: kp1[x[0]].pt, positions_map)
        img3 = cv.drawMatchesKnn(img1, kp1, img2, kp2, [good], None, flags=2)
        h, w = img1.shape
        print(h, w)
        corners = [[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]
        box = produce_bounding_box(corners, query_positions, positions)
        box = map(lambda x: [[x[0][0] + w, x[0][1]]], box)
        img3 = cv.polylines(img3, [np.int32(box)], True, 255, 3, cv.LINE_AA)
        print("result_with_bb{}.png".format(order))
        cv.imwrite("result_with_bb{}.png".format(order), img3)
        return positions
Esempio n. 9
0
def sift_det(image,source):
    # TODO:
    # Sets up match count between images for sensitivity of detection - choose your value!
    MIN_MATCH_COUNT = 10

    # If VideoCapture(feed) doesn't work, manually try -1, 0, 1, 2, 3 (if none of those work, 
    # the webcam's not supported!)
    #cam = cv2.VideoCapture(args["source"])

    # Reads in the image
    img1 = cv2.imread(image, 0)                      

    """ # Labels the image as the name passed in    
    if args["label"] is not None:
        label = args["label"]
    else:
        # Takes the name of the image as the name
        if image[:2] == "./":
            label = label = (image.split("/"))[2]
        else:
            label = image[2:-4] """

    ################################################### Set up Feature Detection

    # Create a the SIFT Detector Object
    try:
        #delete! sift = cv2.xfeatures2d.SIFT_create()
        orb = cv2.ORB_create()
    except AttributeError:
        print("Install 'opencv-contrib-python' for access to the xfeatures2d module")

    # Compute keypoints
    kp, des = orb.detectAndCompute(img1, None)

    FLANN_INDEX_KDTREE = 0
    # Option of changing 'trees' and 'checks' values for different levels of accuracy
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)    
    search_params = dict(checks = 50)                                 

    # Fast Library for Approximate Nearest Neighbor Algorithm
    # Creates FLANN object for use below
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
    #delete! flann = cv2.FlannBasedMatcher(index_params, search_params)

 
        #ret_val, frame = cam.read()
    frame = source
    """ if frame is None:             # Did we get an image at all?
        continue """

    ################################################### Shape Computation

    # TODO:
    # What color space does OpenCV read images in, and what color space do we want process?
    # Check out cvtColor! <https://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html>
    # Read in the image from the camera 
    img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    
    # TODO: 
    # Set up your HSV threshold bounds - [Hue, Saturation, Value]
    lower = np.array([0, 0, 100], dtype = "uint8")
    upper = np.array([100, 10, 90], dtype = "uint8")     

    # TODO: 
    # Check inRange() <https://docs.opencv.org/3.0-beta/modules/core/doc/operations_on_arrays.html?highlight=inrange#invert>
    # Create mask for image with overlapping values
    mask = cv2.inRange(img, lower, upper)

    # TODO:
    # What parameters work best for thresholding? <https://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html?highlight=adaptivethreshold>
    imgThresh = cv2.adaptiveThreshold(mask, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY, 3, 1)
    
    # TODO:
    # This is OpenCV's call to find all of the contours
    # Experiment with different algorithms (cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) 
    # in the parameters of cv2.findContours!
    # <https://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=findContours>
    # The underscores represent Python's method of unpacking return values, but not using them
    _, contours, _ = cv2.findContours(imgThresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)

    # Optional TODO:
    # Optional processing of contours - do we want to remove all non-rectangle shapes from contours?
    # Read the documentation on approxPolyDP <https://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html>

    # TODO:
    # Orders contours - but by what metric? Check the "key" options <https://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html>
    # Ex. key = cv2.contourVectorLen() (Would order the contours by vector length - not an actual function, but this is how you would set the "key")
    # Python's "sorted" function applies a "key" set lambda function to each element within an array, this is not a traditional dictionary "key"
    contours = sorted(contours, key = cv2.contourArea, reverse = True)[1:]   # Removes contouring of display window

    if len(contours) != 0:
        # TODO:  
        # Draws the max of the contours arrays with respect to the "key" chosen above
        contours_max = max(contours, key = cv2.contourArea)

        # Find bounding box coordinates
        rect = cv2.boundingRect(contours_max)
        x, y, w, h = rect

        # TODO:
        # Calculates area of detection - what detection area should be the lower bound?
        if w*h > 28000:
            #cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 4)
            greenRect = ((x, y), (x+w, y+h))


    ################################################### Feature Detection
    # NOTE: NO CODE TO FILL IN HERE - BUT FEEL FREE TO CHANGE THE PARAMETERS

    kp_s, des_s = orb.detectAndCompute(frame, None)

    if(len(kp) >= 2 and len(kp_s) >= 2): 
        # Uses the FLANN algorithm to search for nearest neighbors between elements of two images
        # Faster than the BFMatcher for larger datasets
        matches = bf.knnMatch(des,des_s,k=2)
        #delete! matches = flann.knnMatch(des, des_s, k = 2)
    #except cv2.error:
        #pass

    if des_s is None and len(matches) == 0:
        pass

    # Store all the good matches (based off Lowe's ratio test)
    good = []
    for k, pair in enumerate(matches):
        try:
            (m, n) = pair
            if m.distance < 0.75 * n.distance:
                good.append(m)
        except ValueError:
            pass

    # When there are enough matches, we convert the keypoints to floats in order to draw them later
    if len(good) >= MIN_MATCH_COUNT:
        try:
            src_pts = np.float32([ kp[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp_s[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
        except IndexError:
            pass

        # Homography adds a degree of rotation/translation invariance by mapping the transformation 
        # of points between two images
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()

        h, w = img1.shape
        pts = np.float32([ [0,0],[0,h-2],[w-2,h-2],[w-2,0] ]).reshape(-1,1,2)

        if M is not None:
            dst = cv2.perspectiveTransform(pts, M)
            intDst = np.int32(dst)

            # Draws a bounding box around the area of most matched points
            #cv2.rectangle(frame, (intDst[0][0][0], intDst[0][0][1]), (intDst[2][0][0], intDst[2][0][1]), (0, 0, 255), 4, cv2.LINE_AA, 0)
            redRect = ((intDst[0][0][0], intDst[0][0][1]), (intDst[2][0][0], intDst[2][0][1]))
            cv2.putText(frame, label, (dst[0][0][0], dst[0][0][1]) , cv2.FONT_HERSHEY_TRIPLEX, 1.0, (0, 0, 255), lineType = cv2.LINE_AA )

        else:
            matchesMask = None

    else:
        matchesMask = None

    draw_params = dict(matchColor = (0, 255, 0), # Draw matches in green color
                       singlePointColor = None,
                       matchesMask = matchesMask, # Draw only inliers
                       flags = 2)

    try:
        # Option of slicing the 'good' list to display a certain number of matches (ex. good[:6])
        # Take out draw_params if we do not want to draw matches
        frame = cv2.drawMatches(img1, kp, frame, kp_s, good, None, **draw_params) 
    except cv2.error:
        pass

    # Shows the current frame
    return(redRect, greenRect)
Esempio n. 10
0
def superm2(image):
    mimage = np.fliplr(image)
    kp1, des1 = sift.detectAndCompute(image, None)
    kp2, des2 = sift.detectAndCompute(mimage, None)
    for p, mp in zip(kp1, kp2):
        p.angle = np.deg2rad(p.angle)
        mp.angle = np.deg2rad(mp.angle)
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    houghr = np.zeros(len(matches))
    houghth = np.zeros(len(matches))
    weights = np.zeros(len(matches))
    i = 0
    good = []
    for match, match2 in matches:
        point = kp1[match.queryIdx]
        mirpoint = kp2[match.trainIdx]
        mirpoint2 = kp2[match2.trainIdx]
        mirpoint2.angle = np.pi - mirpoint2.angle
        mirpoint.angle = np.pi - mirpoint.angle
        if mirpoint.angle < 0.0:
            mirpoint.angle += 2 * np.pi
        if mirpoint2.angle < 0.0:
            mirpoint2.angle += 2 * np.pi
        mirpoint.pt = (mimage.shape[1] - mirpoint.pt[0], mirpoint.pt[1])
        if very_close(point.pt, mirpoint.pt):
            mirpoint = mirpoint2
            good.append(match2)
        else:
            good.append(match)
        theta = angle_with_x_axis(point.pt, mirpoint.pt)
        xc, yc = midpoint(point.pt, mirpoint.pt)
        r = xc * np.cos(theta) + yc * np.sin(theta)
        Mij = reisfeld(point.angle, mirpoint.angle, theta) * S(
            point.size, mirpoint.size
        )
        houghr[i] = r
        houghth[i] = theta
        weights[i] = Mij
        i += 1
    # matches = sorted(matches, key = lambda x:x.distance)
    good = sorted(good, key=lambda x: x.distance)

    def draw(r, theta):
        if np.pi / 4 < theta < 3 * (np.pi / 4):
            for x in range(len(image.T)):
                y = int((r - x * np.cos(theta)) / np.sin(theta))
                if 0 <= y < len(image.T[x]):
                    image[y][x] = 255
        else:
            for y in range(len(image)):
                x = int((r - y * np.sin(theta)) / np.cos(theta))
                if 0 <= x < len(image[y]):
                    image[y][x] = 255
    print(houghr)
    print(houghth)
    img3 = cv2.drawMatches(image, kp1, mimage, kp2, good[:15], None, flags=2)

    def hex():
        polys = plt.hexbin(houghr, houghth, bins=200, gridsize=image.shape[1])
        # plt.colorbar()
        # plt.show()
        hvals = polys.get_array()
        hcoords = polys.get_offsets()
        return hcoords[hvals.argmax()]


    best_coords = hex()
    print(best_coords)
    # draw(2.8, 2.4)
    draw(*best_coords)
    cv2.imshow('a', image); cv2.waitKey(0);
Esempio n. 11
0
    def __init__(self, detector):
        self._detector = detector
        self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        self._prevImg = None
def wait_function(single_image_target, new_images_path, images_subject):
    try:
        percentage = 0.50
        geo_portail = []
        sift = cv2.xfeatures2d.SIFT_create()
        images_target_chunk = [single_image_target]

        for id_img, base_image in enumerate(images_subject):
            # print(f"image subject {id_img} - {images_subject[id_img]}")
            # cv2.imread(base_image)
            base_image_original = os.path.basename(base_image)
            # print(f"{str(threading.current_thread().name)}::IMAGE SUBJECT:: {base_image_original}")
            # print(f" ")
            base_image = cv2.imread(base_image, 0)
            # print(f"__Supouse to check {single_image_target} ")
            for idxInt, fileD in enumerate([single_image_target]):
                # print(f"____Checking... Image Target {fileD} ")
                # print(f"Image to processe {single_image_target} ")
                file_path = str(fileD)
                if os.path.exists(file_path):
                    # print(f"____file exist {True} ")
                    file_name = str(os.path.basename(fileD))
                    target_image_color = cv2.imread(file_path)
                    target_image = cv2.imread(file_path, 0)
                    kp1, des1 = sift.detectAndCompute(base_image, None)
                    kp2, des2 = sift.detectAndCompute(target_image, None)
                    bf = cv2.BFMatcher()
                    matches = bf.knnMatch(des1, des2, k=2)
                    good = []
                    percentage = 0.65 if base_image_original == 'logo.png' else percentage
                    percentage = 0.65 if base_image_original == 'logo_cadastre.png' else percentage

                    cont = 0
                    for match1, match2 in matches:
                        if match1.distance < percentage * match2.distance:
                            good.append([match1])
                            if cont == 0:
                                # print(f"____found match {True} " )
                                cont = 1
                            if base_image_original == "doble.png":
                                if file_path not in geo_portail and len(
                                        matches) >= 1400 and len(good) >= 70:
                                    # geo_portail.append(file_path)
                                    cv2.imwrite(
                                        os.path.join(new_images_path,
                                                     file_name),
                                        target_image_color)
                                    matched = True
                                    if idx > 0 and len(
                                            images_target_chunk) > 0:
                                        print(
                                            f"Saved: {base_image_original} => {file_name} current size chunk {len(images_target_chunk)} - {id_img}"
                                        )
                                        del images_target_chunk[idxInt]
                                        print(
                                            f"{base_image_original}: after delete: {len(images_target_chunk)} - {id_img}"
                                        )
                                        break

                            if base_image_original == "green.png":
                                if file_path not in geo_portail and len(
                                        matches) >= 2500 and len(good) >= 120:
                                    # geo_portail.append(file_path)
                                    cv2.imwrite(
                                        os.path.join(new_images_path,
                                                     file_name),
                                        target_image_color)
                                    matched = True
                                    if idx > 0 and len(
                                            images_target_chunk) > 0:
                                        print(
                                            f"Saved: {base_image_original} => {file_name} current size chunk {len(images_target_chunk)} - {id_img}"
                                        )
                                        del images_target_chunk[idxInt]
                                        print(
                                            f"{base_image_original}: after delete: {len(images_target_chunk)} - {id_img}"
                                        )
                                        break

                            if base_image_original == "icons.png":
                                if file_path not in geo_portail and len(
                                        matches) >= 120 and len(good) >= 45:
                                    # geo_portail.append(file_path)
                                    cv2.imwrite(
                                        os.path.join(new_images_path,
                                                     file_name),
                                        target_image_color)
                                    matched = True
                                    if idx > 0 and len(
                                            images_target_chunk) > 0:
                                        print(
                                            f"Saved: {base_image_original} => {file_name} current size chunk {len(images_target_chunk)} - {id_img}"
                                        )
                                        del images_target_chunk[idxInt]
                                        print(
                                            f"{base_image_original}: after delete: {len(images_target_chunk)} - {id_img}"
                                        )
                                        break

                            if base_image_original == "logo_cadastre.png":
                                if file_path not in geo_portail and len(
                                        matches) >= 150 and len(good) >= 27:
                                    # geo_portail.append(file_path)
                                    cv2.imwrite(
                                        os.path.join(new_images_path,
                                                     file_name),
                                        target_image_color)
                                    matched = True
                                    if idx > 0 and len(
                                            images_target_chunk) > 0:
                                        print(
                                            f"Saved: {base_image_original} => {file_name} current size chunk {len(images_target_chunk)} - {id_img}"
                                        )
                                        del images_target_chunk[idxInt]
                                        print(
                                            f"{base_image_original}: after delete: {len(images_target_chunk)} - {id_img}"
                                        )
                                        break

                            if base_image_original == "logo.png":
                                if file_path not in geo_portail and len(
                                        matches) > 200 and len(good) >= 75:
                                    # geo_portail.append(file_path)
                                    # print(f"Save here: {os.path.join(new_images_path, file_name)}")
                                    cv2.imwrite(
                                        os.path.join(new_images_path,
                                                     file_name),
                                        target_image_color)
                                    matched = True
                                    if idx > 0 and len(
                                            images_target_chunk) > 0:
                                        print(
                                            f"Saved: {base_image_original} => {file_name} current size chunk {len(images_target_chunk)} - {id_img}"
                                        )
                                        del images_target_chunk[idxInt]
                                        print(
                                            f"{base_image_original}: after delete: {len(images_target_chunk)} - {id_img}"
                                        )
                                        break

                            if len(good) >= 125:
                                print('::::::::break Good:::::::')
                                break
        #     print(f"{base_image_original}: acabo chunk : {len(images_target_chunk)} - {id_img}")
        #
        # print(f"{base_image_original}: ACABO SUBJECT : {len(images_target_chunk)} - {images_subject[id_img]}")
        # print(f"GEO PORTAILS {len(geo_portail)}")
        print('Done this Image')
        return True

    except ValueError as e:
        print('main: saw error "{}" when accessing result'.format(e))
        return False
Esempio n. 13
0
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

figure, ax = plt.subplots(1, 3, figsize=(16, 8))

ax[0].imshow(img1, cmap='gray')
ax[2].imshow(img2, cmap='gray')

#sift
sift = cv2.xfeatures2d.SIFT_create()

keypoints_1, descriptors_1 = sift.detectAndCompute(img1,None)
keypoints_2, descriptors_2 = sift.detectAndCompute(img2,None)

len(keypoints_1), len(keypoints_2)

#feature matching
bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)

matches = bf.match(descriptors_1,descriptors_2)
matches = sorted(matches, key = lambda x:x.distance)

img3 = cv2.drawMatches(img1, keypoints_1, img2, keypoints_2, matches[:50], img2, flags=2)
ax[1].imshow(img3)
plt.savefig(f"{path}result2.jpg")
plt.show()
# %%
plt.show()
# %%
Esempio n. 14
0
def make_diff(fpath, diff_path):

    image_list = os.listdir(fpath)
    if '.DS_Store' in image_list:
        image_list.remove('.DS_Store')
    neighbour_list = os.listdir(fpath)
    if '.DS_Store' in neighbour_list:
        neighbour_list.remove('.DS_Store')
    image_path = osp.join(fpath, 'NormalShot.jpg')
    neighbour_path = osp.join(fpath, 'WideShot.jpg')
    # Affine Transform
    img_ = cv2.imread(image_path)
    img_ = cv2.resize(img_, (512, 512))
    img1 = cv2.cvtColor(img_, cv2.COLOR_BGR2GRAY)
    img = cv2.imread(neighbour_path)
    img = cv2.resize(img, (512, 512))
    img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    mean = np.average(img1)
    mmax = np.amax(img1)
    mmin = np.amin(img1)
    variance = mmax - mmin
    # lb = mean - variance / 2
    # ub = mean + variance / 2

    is_align = False
    try:
        # sift = cv2.xfeatures2d.SIFT_create(contrastThreshold=0.03,edgeThreshold=10000)
        sift = cv2.xfeatures2d.SIFT_create()
        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)

        # img3 = cv2.drawKeypoints(img1, kp1, img1)
        # cv2.imwrite(diff_path+'/sift_keypoints1.png', img3)
        # cv2.imwrite(diff_path+'/original1.png', img1)
        # img3 = cv2.drawKeypoints(img2, kp2, img2)
        # cv2.imwrite(diff_path+'/sift_keypoints2.png', img3)
        # cv2.imwrite(diff_path+'/original2.png', img2)
        #
        # stereo = cv2.StereoBM_create()
        # disparity = stereo.compute(img1, img2)
        # cv2.imwrite(diff_path+'/disparity.png', disparity)

        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)

        # Apply ratio test
        good = []
        for m in matches:
            if m[0].distance < 0.5 * m[1].distance:
                good.append(m)
        matches = np.asarray(good)

        if len(matches[:, 0]) < 4:
            raise AssertionError("Can’t find enough keypoints")

        src = np.float32([kp1[m.queryIdx].pt
                          for m in matches[:, 0]]).reshape(-1, 1, 2)
        dst = np.float32([kp2[m.trainIdx].pt
                          for m in matches[:, 0]]).reshape(-1, 1, 2)
        H, masked = cv2.findHomography(src, dst, cv2.RANSAC, 5)
        # save_path = "/Users/jason/Documents/GitHub/DAIN_py/tmp"
        # cv2.imwrite(save_path + '/view1.png', img_)
        # cv2.imwrite(save_path + '/view2.png', img)

        dst = cv2.warpPerspective(img1, H, (img_.shape[1], img_.shape[0]))
        # plt.imshow(img_)
        # plt.show()
        # plt.figure()
        # plt.imshow(img)
        # plt.show()
        # plt.figure()
        # plt.imshow(dst)
        # plt.show()
        # plt.figure()

        idx = (dst == 0)
        # diff =cv2.cvtColor(img-dst, cv2.COLOR_BGR2GRAY)
        diff = np.absolute(img2 - dst)
        diff[idx] = mean
        # diff = cv2.normalize(diff, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
        # print(diff)
        # diff = cv2.equalizeHist(diff)
        # diff = cv2.medianBlur(diff,3)
        # diff = cv2.blur(diff, (3,3))
        # diff = cv2.GaussianBlur(diff, (3,3),0)
        diff = cv2.bilateralFilter(diff, 9, 75, 75)
        # plt.imshow(diff)
        # plt.show()
        # plt.figure()

        # cv2.imwrite('/Users/jason/Documents/GitHub/DAIN_py/tmp/crop1.png', dst)
        # cv2.imwrite('/Users/jason/Documents/GitHub/DAIN_py/tmp/crop2.png', img)
        # cv2.imwrite(save_path+'/diff.png',diff)
        # diff = cv2.bilateralFilter(diff, 9, 75, 75)
        is_align = True
    except:
        # diff = cv2.cvtColor(img - img_, cv2.COLOR_BGR2GRAY)
        diff = np.absolute(img2 - img1)

    # diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
    diff = cv2.normalize(diff, None, mmin, mmax, norm_type=cv2.NORM_MINMAX)
    diff = cv2.resize(diff, (240, 240))
    diff = np.stack((diff, ) * 3, axis=-1)
    cv2.imwrite(diff_path + '/diff.png', diff)

    return is_align
Esempio n. 15
0
def match(detector):
    #Reading the image pair
    img1 = cv2.imread('../Image_Pairs/torb_small1.png')
    print("Dimension of image 1:", img1.shape[0], "rows x", img1.shape[1],
          "columns")
    print("Type of image 1:", img1.dtype)
    img2 = cv2.imread('../Image_Pairs/torb_small2.png')
    print("Dimension of image 2:", img2.shape[0], "lignes x", img2.shape[1],
          "columns")
    print("Type of image 2:", img2.dtype)

    #Beginning the calculus
    t1 = cv2.getTickCount()
    #Creation of objects "keypoints"
    if detector == 1:
        kp1 = cv2.ORB_create(
            nfeatures=500,  #By default : 500
            scaleFactor=1.2,  #By default : 1.2
            nlevels=8)  #By default : 8
        kp2 = cv2.ORB_create(nfeatures=500, scaleFactor=1.2, nlevels=8)
        print("Detector: ORB")
    else:
        kp1 = cv2.KAZE_create(
            upright=False,  #By default : false
            threshold=0.001,  #By default : 0.001
            nOctaves=4,  #By default : 4
            nOctaveLayers=4,  #By default : 4
            diffusivity=2)  #By default : 2
        kp2 = cv2.KAZE_create(upright=False,
                              threshold=0.001,
                              nOctaves=4,
                              nOctaveLayers=4,
                              diffusivity=2)
        print("Detector: KAZE")
    #Conversion to gray scale
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    #Detection and description of keypoints
    pts1, desc1 = kp1.detectAndCompute(gray1, None)
    pts2, desc2 = kp2.detectAndCompute(gray2, None)
    t2 = cv2.getTickCount()
    time = (t2 - t1) / cv2.getTickFrequency()
    print("Detection of points and computation of descriptors:", time, "s")
    # Beginning of matching
    t1 = cv2.getTickCount()
    if detector == 1:
        #Hamming distance for descriptor BRIEF (ORB)
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
    else:
        #L2 distance for descriptor M-SURF (KAZE)
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
    # 2-nearest-neighbours list extraction
    matches = bf.knnMatch(desc1, desc2, k=2)
    # Application of the ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append([m])
    t2 = cv2.getTickCount()
    time = (t2 - t1) / cv2.getTickFrequency()
    print("Matching Computation:", time, "s")

    # Displaying the matches that respect the ratio test
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       flags=0)
    img3 = cv2.drawMatchesKnn(gray1, pts1, gray2, pts2, good, None,
                              **draw_params)
    Nb_ok = len(good)
    return img3, Nb_ok
Esempio n. 16
0
def sift_bfma(im1, im2, lowe_ratio, min_match, display, ransac_th):
    import numpy as np
    import cv2 as cv
    from matplotlib import pyplot as plt
    from PIL import Image

    #Reading images
    img1 = cv.imread(im1, 1)
    img2 = cv.imread(im2, 1)
    #Making sift object - it takes default but also can take
    #(nfeatures,nOctaveLayers,contrastThreshold,edgeThreshold,sigma)
    sift = cv.xfeatures2d.SIFT_create()
    #Exporting to grayscale
    gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)
    gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)
    #Detecting keypoints and computing descriptors for every keypoint
    kp1, des1 = sift.detectAndCompute(gray1, None)
    kp2, des2 = sift.detectAndCompute(gray2, None)

    bf = cv.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    if lowe_ratio == None:
        lowe_ratio = 0.75

    good = []
    for m, n in matches:
        if m.distance < lowe_ratio * n.distance:
            good.append(m)

    if min_match == None:
        MIN_MATCH_COUNT = 10
    else:
        MIN_MATCH_COUNT = min_match

    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)

        if ransac_th == None:
            ransac_th = 5.0

        M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, ransac_th)
        matchesMask = mask.ravel().tolist()

        h, w, d = img1.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv.perspectiveTransform(pts, M)

        img2 = cv.polylines(img2, [np.int32(dst)], True, 255, 3, cv.LINE_AA)
    else:
        print("Not enough matches are found - {}/{}".format(
            len(good), MIN_MATCH_COUNT))
        matchesMask = None

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        matchesMask=matchesMask,  # draw only inliers
        flags=2)
    img3 = cv.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
    if display != None:
        plt.imshow(cv.cvtColor(img3,
                               cv.COLOR_BGR2RGB)), plt.axis("off"), plt.show()
    return (img1, img2, img3, good)
                              0, general_settings["camera_focal_length_y"],
                              general_settings["camera_center_x"]
                          ], [0, 0, 1]],
                         dtype="double")
# Coefficients are [k1, k2, p1, p2, k3] as per OpenCV documentation
distortion_coefficients = np.array([
    general_settings["camera_k1"], general_settings["camera_k2"],
    general_settings["camera_p1"], general_settings["camera_p2"],
    general_settings["camera_k3"]
])

#################################
# Detect features on the markers, compute pixel dimensions for coordinate calculation.
feature_detector = cv2.ORB_create()  #Initialise the ORB feature detector.
matcher = cv2.BFMatcher(
    cv2.NORM_HAMMING2, crossCheck=True
)  #Initialise the Brute Force matcher. Change the algorithm if you think it's reasonable to do so.

for i in range(0, general_settings["no_of_markers"]):
    #In this loop, for each marker as defined in the config file, we:
    # 1., Load the image
    markers[i]["image_data"] = cv2.imread(markers[i]["file_name"],
                                          cv2.IMREAD_COLOR)
    # 2., Calculate pixel width and height based on specified marker dimensions
    markers[i]["height_in_px"], markers[i]["width_in_px"], _ = markers[i][
        "image_data"].shape
    markers[i]["pixel_width"] = markers[i]["width"] / markers[i][
        "width_in_px"]  #Width of one pixel
    markers[i]["pixel_height"] = markers[i]["height"] / markers[i][
        "height_in_px"]  #Height of one pixel
    # 3., Identify features (key_points and descriptors)
Esempio n. 18
0
def main(opt):
    #region Initialization
    ARTools.log.setLevel(getattr(logging, opt.log.upper(), None))

    # descriptor
    if opt.detector == 'SIFT':
        detector = cv.SIFT_create()
        log.warning('Pipeline use SIFT detector (realtime is compromised)')
    elif opt.detector == 'ORB':
        detector = cv.ORB_create()
        log.info('Pipeline use ORB detector')
    else:
        detector = cv.AKAZE_create()
        log.info('Pipeline use AKAZE detector')

    # Matcher
    if opt.matcher == 'L2':
        matcher = cv.BFMatcher(normType=cv.NORM_L2)
        log.warning('Pipeline use BRUTEFORCE matcher')
    elif opt.matcher == 'FLANN':
        matcher = cv.FlannBasedMatcher()
        log.info('Pipeline use FLANNBASED matcher')
    else:
        matcher = cv.BFMatcher(normType=cv.NORM_HAMMING)
        log.info('Pipeline use BRUTEFORCE_HAMMING matcher')

    pipeline = ARPipeline(capture=opt.source,
                          width=opt.x,
                          height=opt.y,
                          calibration=opt.calibration,
                          detector=detector,
                          matcher=matcher)
    pipeline.loadMarker(opt.marker)
    model = OBJ(opt.object, swapyz=True)
    #endregion

    #region AR Pipeline
    while not Camera.checkKey():
        frame = pipeline.getFrame()
        if frame is not None:
            # Compute pose estimation
            matches, frame_kp = pipeline.computeMatches(frame)
            homography, _ = pipeline.computeHomography(
                matches, frame_kp, minMatches=opt.minMatches)
            matches_refined = pipeline.refineMatches(matches,
                                                     frame_kp,
                                                     minMatches=opt.minMatches)
            homography_refined, _ = pipeline.computeHomography(
                matches_refined, frame_kp, minMatches=opt.minMatches)
            warped, homography_warped = pipeline.warpMarker(
                frame, homography_refined, minMatches=opt.minMatches)
            rvecs, tvecs = pipeline.computePose(frame,
                                                homography_warped)  #@TODO pnp

            # Rendering
            ar = frame.copy()
            #ar=pipeline.renderer.draw2DRectangle(ar,homography,color=(255,0,0))
            #ar=pipeline.renderer.draw2DRectangle(ar,homography_refined,color=(0,255,0))
            #ar=pipeline.renderer.draw2DRectangle(ar,homography_warped,color=(0,0,255))
            #ar=pipeline.renderer.draw3DCube(ar,rvecs,tvecs)
            ar = pipeline.renderer.drawObj(ar,
                                           homography_warped,
                                           model,
                                           eye=0.4)
            cv.imshow('AR Camera (press "esc" to quit)', ar)

            if opt.all:
                cv.imshow('Keypoints',
                          pipeline.renderer.drawKeypoints(frame, frame_kp))
                img_matches = pipeline.renderer.drawMatches(
                    frame, frame_kp, matches, maxMatches=opt.maxMatches)
                img_matches = cv.resize(img_matches,
                                        (frame.shape[1], frame.shape[0]))
                cv.imshow('Matches', img_matches)
                img_matches_refined = pipeline.renderer.drawMatches(
                    frame,
                    frame_kp,
                    matches_refined,
                    maxMatches=opt.maxMatches)
                img_matches_refined = cv.resize(
                    img_matches_refined, (frame.shape[1], frame.shape[0]))
                cv.imshow('Matches refined', img_matches_refined)
                cv.imshow('Warp', warped)

                # Initialize position of window once
                if not opt.unmove:
                    opt.unmove = True
                    ypadding = 60
                    cv.moveWindow('AR Camera (press "esc" to quit)', 0, 0)
                    cv.moveWindow('Keypoints', frame.shape[1], 0)
                    cv.moveWindow('Matches', 2 * frame.shape[1], 0)
                    cv.moveWindow('Matches refined', 2 * frame.shape[1],
                                  frame.shape[0] + ypadding)
                    cv.moveWindow('Warp', frame.shape[1],
                                  frame.shape[0] + ypadding)
        else:
            # No frame available
            break
Esempio n. 19
0
 def __init__(self, nfeatures=500, draw=False):
     self.SIFT = cv2.xfeatures2d_SIFT.create(nfeatures=nfeatures)
     self.bf = cv2.BFMatcher()
     self.draw = draw
Esempio n. 20
0
kpts2, desc2 = sift.detectAndCompute(img2, None)
print len(kpts1)
print len(kpts2)
# cv2.imshow('img1', img1)
# cv2.imshow('img2', img2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

## second neighbour pre-filtering
nnThreshold = 0.8
time1 = clock()
tree = spatial.KDTree(desc1, leafsize=10)
time2 = clock()
print 'time: ', time2 - time1
query_desc = desc2
matcher = cv2.BFMatcher(cv2.NORM_L2)
time3 = clock()
raw_matches = matcher.knnMatch(desc2, trainDescriptors=desc1, k=2)
time4 = clock()
print 'query time: ', time4 - time3
print len(raw_matches)
print raw_matches[1][0].queryIdx
print raw_matches[1][0].trainIdx
print raw_matches[0][0].imgIdx
print raw_matches[1][0].imgIdx
print raw_matches[1][0].distance
print type(raw_matches)
good_match = []

for i in range(len(raw_matches)):
    if (raw_matches[i][0].distance / raw_matches[i][1].distance) <= float(
Esempio n. 21
0
def detect():
    if request.method == 'POST':
        f = request.files['file']
        f.save(os.path.join(app.config['UPLOAD_FOLDER'], "test.jpg"))

    notes = [
        "10.jpg", "10_new.jpg", "20.jpg", "20_new.jpg", "50.jpg", "50_new.jpg",
        "100.jpg", "100_new.jpg", "200.jpg", "500.jpg", "2000.jpg", "test.jpg"
    ]
    res = []

    for i in range(len(notes) - 1):
        img1 = cv2.imread("images//" + notes[i], 0)
        img2 = cv2.imread("images//" + notes[11], 0)
        img1 = resize_img(img1, 0.5)
        img2 = resize_img(img2, 0.2)
        # ORB Detector
        orb = cv2.ORB_create()
        kp1, des1 = orb.detectAndCompute(img1, None)
        kp2, des2 = orb.detectAndCompute(img2, None)
        #print(des1)
        # Brute Force Matching
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(des1, des2)
        #print(matches)
        print(len(matches))
        res.append(len(matches))
        matches = sorted(matches, key=lambda x: x.distance)
        matching_result = cv2.drawMatches(img1,
                                          kp1,
                                          img2,
                                          kp2,
                                          matches[:50],
                                          None,
                                          flags=2)

        #cv2.imshow("Test", img2)

        start_point = (0, 0)
        end_point = (0, 0)
        color = (0, 0, 0)
        thickness = 0
        image = cv2.rectangle(img2, start_point, end_point, color, thickness)
        #cv2.imshow("result", image)

        #cv2.imshow("Matching result", matching_result)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    val = res.index(max(res))
    print(res.index(max(res)))
    font = cv2.FONT_HERSHEY_SIMPLEX
    deno = 0
    if (val == 0 or val == 1):
        deno = 10
    elif (val == 2 or val == 3):
        deno = 20
    elif (val == 4 or val == 5):
        deno = 50
    elif (val == 6 or val == 7):
        deno = 100
    elif (val == 8):
        deno = 200
    elif (val == 9):
        deno = 500
    else:
        deno = 2000

    font = cv2.FONT_HERSHEY_SIMPLEX
    org = (25, 25)
    fontScale = 1
    color = (255, 0, 0)
    thickness = 2
    image = cv2.putText(image,
                        'Currency Amount Detected is ' + str(deno) + ' INR',
                        org, font, fontScale, color, thickness, cv2.LINE_AA)
    flash('Currency Amount Detected is ' + str(deno) + ' INR')
    with open('config.json') as f:
        data = json.load(f)
    cnt = data['params']['count']
    data['params']['count'] = data['params']['count'] + 1
    with open('config.json', 'w') as f:
        json.dump(data, f)
    name = "res" + str(cnt) + ".jpg"
    cv2.imwrite("static//result//" + name, image)
    address = "static/result/res" + str(cnt) + ".jpg"
    print(address)
    return render_template('index.html', addr=address)
Esempio n. 22
0
)  # ORB is a fast working algorithm and it's free unlike swift/surf

kp1, des1 = orb.detectAndCompute(
    img1, None)  # kp1 will be the features, None is meant for the mask
kp2, des2 = orb.detectAndCompute(img2, None)

# descriptors details
# print(des1)
# print(des1.shape)     # (498, 32)
# print(des2.shape)     # (500, 32)

# ORB detector uses 500 features by default. So it will try to find 500 features in the images.
# For each feature, ORB will describe it in 32 values.

# Now that we have the descriptors, we can use a "matcher" to match these descriptors together.
bf = cv2.BFMatcher()  # brute force matcher
matches = bf.knnMatch(
    des1, des2, k=2)  # k=2 as we want two values that we can compare later.

# To decide whether it's a good match or not
good_matches = []
for m, n in matches:  # m, n are values from k=2
    if m.distance < 0.75 * n.distance:
        good_matches.append([m])

print(len(good_matches))

# Plotting the good matches
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good_matches, None, flags=2)

# To view the keypoints
Esempio n. 23
0
 def __init__(self):
     self.sift = cv2.xfeatures2d.SIFT_create()
     self.brute = cv2.BFMatcher()
Esempio n. 24
0
def match(detector):

    #Reading the image pair
    img1 = cv2.imread('../Image_Pairs/torb_small1.png')
    print("Dimension of image 1:", img1.shape[0], "rows x", img1.shape[1],
          "columns")
    print("Type of image 1:", img1.dtype)
    img2 = cv2.imread('../Image_Pairs/torb_small2.png')
    print("Dimension of image 2:", img2.shape[0], "lignes x", img2.shape[1],
          "columns")
    print("Type of image 2:", img2.dtype)

    #Beginning the calculus
    t1 = cv2.getTickCount()
    #Creation of objects "keypoints"
    if detector == 1:
        kp1 = cv2.ORB_create(
            nfeatures=500,  #By default : 500
            scaleFactor=1.2,  #By default : 1.2
            nlevels=8)  #By default : 8
        kp2 = cv2.ORB_create(nfeatures=500, scaleFactor=1.2, nlevels=8)
        print("Detector: ORB")
    else:
        kp1 = cv2.KAZE_create(
            upright=False,  #By default : false
            threshold=0.001,  #By default : 0.001
            nOctaves=4,  #By default : 4
            nOctaveLayers=4,  #By default : 4
            diffusivity=2)  #By default : 2
        kp2 = cv2.KAZE_create(upright=False,
                              threshold=0.001,
                              nOctaves=4,
                              nOctaveLayers=4,
                              diffusivity=2)
        print("Detector: KAZE")
    #Conversion to gray scale
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    #Detection and description of keypoints
    pts1, desc1 = kp1.detectAndCompute(gray1, None)
    pts2, desc2 = kp2.detectAndCompute(gray2, None)
    #Un-matched points will appear in grey
    img1 = cv2.drawKeypoints(gray1, pts1, None, color=(127, 127, 127), flags=0)
    img2 = cv2.drawKeypoints(gray2, pts2, None, color=(127, 127, 127), flags=0)
    t2 = cv2.getTickCount()
    time = (t2 - t1) / cv2.getTickFrequency()
    print("Detection points and descriptors computation:", time, "s")
    # Beginning of Matching
    t1 = cv2.getTickCount()
    if detector == 1:
        #Hamming distance for descriptor BRIEF (ORB)
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    else:
        #L2 distance for descriptor M-SURF (KAZE)
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
    matches = bf.match(desc1, desc2)
    # Sorting the matches
    matches = sorted(matches, key=lambda x: x.distance)
    t2 = cv2.getTickCount()
    time = (t2 - t1) / cv2.getTickFrequency()
    print("Matching Computation:", time, "s")

    # Display the N best matches
    Nbest = 200
    img3 = cv2.drawMatches(img1,
                           pts1,
                           img2,
                           pts2,
                           matches[:Nbest],
                           None,
                           flags=2)
    return img3, Nbest
Esempio n. 25
0
            mask = cv.normalize(mask, mask, 0, 1, cv.NORM_MINMAX)
            #PATH = 'C:/Users/zszentim/Documents/ENGG6100Assign3/rutgers_apc_dataset/all_data/cheezit_big_original-pose-C-1-2-0.yml'

            img2 = img2 * mask
            img1 = cv.normalize(img1, img1, 0, 255, cv.NORM_MINMAX)
            img2 = cv.normalize(img2, img2, 0, 255, cv.NORM_MINMAX)

            # Initiate SIFT detector
            sift = cv.xfeatures2d.SIFT_create()

            # find the keypoints and descriptors with SIFT
            kp1, des1 = sift.detectAndCompute(img1, None)
            kp2, des2 = sift.detectAndCompute(img2, None)

            # BFMatcher with default params
            bf = cv.BFMatcher()
            matches = bf.knnMatch(des1, des2, k=2)
            newm = []
            # Apply ratio test
            good = []
            good_without_list = []
            for m, n in matches:
                if m.distance < 0.75 * n.distance:  #0.6 for cheezit but I should try a higher value, might get better results
                    good.append([m])
                    good_without_list.append(m)

            #cv.drawMatchesKnn expects list of lists as matches.
            img3 = cv.drawMatchesKnn(
                img1,
                kp1,
                img2,
Esempio n. 26
0
for i in corners_2:
    x, y = i.ravel()
    cv2.circle(img_zero_2, (x, y), 1, (255, 255, 255), 2)
    fe_2 = fe_2 + 1

cv2.imshow("src", img_zero_1)
cv2.imshow("dst", img_zero_2)

print(fe_1)
print(fe_2)

detector = cv2.AKAZE_create()
kpts1, desc1 = detector.detectAndCompute(img_zero_1, None)
kpts2, desc2 = detector.detectAndCompute(img_zero_2, None)
matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = matcher.match(desc1, desc2)

h1 = img_zero_1.shape[0]
w1 = img_zero_1.shape[1]
h2 = img_zero_2.shape[0]
w2 = img_zero_2.shape[1]

img_dst = np.zeros((max(h1, h2), w1 + w2, 3), np.uint8)

cv2.drawMatches(img_src_1, kpts1, img_src_2, kpts2, matches, img_dst)
cv2.imwrite("output.jpg", img_dst)

cv2.imshow('out', img_dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 13:26:59 2018

@author: dpr
"""
"""
will match an object contained in 2 different images without caring about orientation
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt

img1 = cv2.imread('F:/temp/opencv-feature-matching-template.jpg', 0)
img2 = cv2.imread('F:/temp/opencv-feature-matching-image.jpg', 0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)
plt.imshow(img3)
plt.show()
Esempio n. 28
0
def init_feature():
    detector = cv2.BRISK_create()
    norm = cv2.NORM_HAMMING
    matcher = cv2.BFMatcher(norm)
    return detector, matcher
Esempio n. 29
0
import numpy as np
import cv2

dataset = cv2.imread('dataset/imagemPare_11.jpg')

target = cv2.imread('target/pare.jpg')

datasetGray = cv2.cvtColor(dataset, cv2.COLOR_BGR2GRAY)
targetGray = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)

sift = cv2.xfeatures2d.SIFT_create()

kpDataset, desDataset = sift.detectAndCompute(datasetGray, None)
kpTarget, desTarget = sift.detectAndCompute(targetGray, None)

bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)

matches = bf.match(desDataset, desTarget)

matches = sorted(matches, key=lambda x: x.distance)

img3 = cv2.drawMatches(dataset,
                       kpDataset,
                       target,
                       kpTarget,
                       matches[:10],
                       None,
                       flags=2)
cv2.imshow('teste1', img3)

img3 = cv2.drawMatches(dataset,
Esempio n. 30
0
class board:
    mask = None
    board_field = None
    showflag = True
    exact = None
    moneys = []
    money_files = {"1b.png": "1 yuan back", "1f.png": "1 yuan front",
                   "10b.png": "10 yuan back", "10f.png": "10 yuan front",
                   "20b.png": "20 yuan back", "20f.png": "20 yuan front",
                   "50b.png": "50 yuan back", "50f.png": "50 yuan front",
                   "100b.png": "100 yuan back", "100f.png": "100 yuan front",
                   "5b.png": "5 yuan back", "5f.png": "5 yuan front",
                   "1jb.png":"1 jiao back","1jf.png": "1 jiao front",
                   "5jb.png": "5 jiao back", "5jf.png": "5 jiao front"
                   }
    detector = cv2.ORB(800)
    norm = cv2.NORM_HAMMING
    matcher = cv2.BFMatcher(norm)

    def __init__(self):
        self.cam = cv2.VideoCapture(0)
        self.cam.set(3, 1280)
        self.cam.set(4, 720)
        w = self.cam.get(3)
        h = self.cam.get(4)
        print w, h
        cv2.namedWindow("cam")
        cv2.setMouseCallback("cam", self.on_mouse)

        # img1 = cv2.bilateralFilter(img1, 11, 17, 17)  # 双边滤波
        # img1 = cv2.Canny(img1, 30, 200)  # 边缘canny检测
        for d in self.money_files:
            print d
            img = cv2.imread(d)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            kp, desc = self.detector.detectAndCompute(img, None)
            self.moneys.append((desc, self.money_files[d]))

    def on_mouse(self, event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            print (x, y)
            if x < 100 and y < 100:  # 点击窗体左上角完成保存文件
                im = None
                if self.showflag:
                    im = self.img
                else:
                    im = self.hsv
                if im != None:  # 保存图片至png文件
                    cv2.imwrite(datetime.now().strftime(
                        "%m%d%H%M%S") + ".png", im)  # 时间
                    print "save png file to:\n", datetime.now().strftime("%m%d%H%M%S") + ".png"

    def rotate_about_center(self, src, angle, scale=1.):
        w = src.shape[1]
        h = src.shape[0]
        rangle = np.deg2rad(angle)  # angle in radians
        # now calculate new image width and height
        nw = (abs(np.sin(rangle) * h) + abs(np.cos(rangle) * w)) * scale
        nh = (abs(np.cos(rangle) * h) + abs(np.sin(rangle) * w)) * scale
        # ask OpenCV for the rotation matrix
        rot_mat = cv2.getRotationMatrix2D((nw * 0.5, nh * 0.5), angle, scale)
        # calculate the move from the old center to the new center combined
        # with the rotation
        rot_move = np.dot(rot_mat, np.array(
            [(nw - w) * 0.5, (nh - h) * 0.5, 0]))
        # the move only affects the translation, so update the translation
        # part of the transform
        rot_mat[0, 2] += rot_move[0]
        rot_mat[1, 2] += rot_move[1]
        return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)

    def find_contours(self, img):
        self.gray = img.copy()
        # img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        # img=cv2.bitwise_not(img)   #图像取反
        # img = cv2.GaussianBlur(img, (5, 5), 0)  # Gauss滤波
        img = cv2.bilateralFilter(img, 11, 17, 17)  # 双边滤波
        # t,img=cv2.threshold(img, 0, 255, cv2.THRESH_OTSU )  #二值化
        img = cv2.Canny(img, 30, 200)  # 边缘canny检测
        # img=cv2.dilate(img,cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))) #膨胀算法
        # img=cv2.medianBlur(img,3) #中值滤波
        # self.hsv=img.copy()         #保存以便显示

        cnts, _ = cv2.findContours(
            img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:20]
        squares = []
        for cnt in cnts:
            cnt_len = cv2.arcLength(cnt, True)
            cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)  # 最小包裹
            if len(cnt) == 4 and cv2.isContourConvex(cnt):  # 4边凸包
                squares.append(cnt)
                break
        if len(squares) > 0:
            screenCnt = squares[0]
            pts = screenCnt.reshape(4, 2)
            rect = np.zeros((4, 2), dtype="float32")

            # the top-left point has the smallest sum whereas the
            # bottom-right has the largest sum
            s = pts.sum(axis=1)
            rect[0] = pts[np.argmin(s)]
            rect[2] = pts[np.argmax(s)]

            # compute the difference between the points -- the top-right
            # will have the minumum difference and the bottom-left will
            # have the maximum difference
            diff = np.diff(pts, axis=1)
            rect[1] = pts[np.argmin(diff)]
            rect[3] = pts[np.argmax(diff)]

            (tl, tr, br, bl) = rect
            widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
            widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))

            # ...and now for the height of our new image
            heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
            heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))

            # take the maximum of the width and height values to reach
            # our final dimensions
            maxWidth = max(int(widthA), int(widthB))
            maxHeight = max(int(heightA), int(heightB))
            ratio = max(maxHeight, maxWidth) / 600.0
            maxWidth, maxHeight = int(maxWidth / ratio), int(maxHeight / ratio)
            dst = np.array([
                [0, 0],
                [maxWidth - 1, 0],
                [maxWidth - 1, maxHeight - 1],
                [0, maxHeight - 1]], dtype="float32")
            M = cv2.getPerspectiveTransform(rect, dst)
            self.exact = cv2.warpPerspective(
                self.img, M, (maxWidth, maxHeight))
            if maxWidth < maxHeight:
                self.exact = self.rotate_about_center(self.exact, 90)
            #self.exact = cv2.GaussianBlur(self.exact, (7, 7), 0)
            #self.exact = cv2.Canny(self.exact, 5, 100)
            self.recognition(self.exact)
        cv2.drawContours(self.img, squares, -1, (255, 128, 0), 3)
    def recognition(self, img):
        kp1, desc1 = self.detector.detectAndCompute(img, None)
        for md, mn in self.moneys:
            matches = self.matcher.knnMatch(desc1, md, k=2)
            good = []
            for m, n in matches:
                # print m.distance, n.distance, m.distance / n.distance
                # filter those pts similar to the next good ones
                if m.distance < 0.7 * n.distance:
                    good.append(m)
            if len(good) > 5:
                print mn,":",len(good)


    def run(self):  # 主函数
        while True:
            ret, self.img = self.cam.read()  # 读取摄像头图片
            self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
            self.recognition(self.gray.copy())
            if self.showflag:  # 切换显示窗体
                cv2.imshow("cam", self.img)
                cv2.setMouseCallback("cam", self.on_mouse)  # 重新注册鼠标事件
                if self.exact != None:
                    cv2.imshow("exact", self.exact)
            key = cv2.waitKey(20)
            if(key == 27):
                break  # esc退出程序
        self.cam.release()
        cv2.destroyAllWindows()