Beispiel #1
0
def get_name(compare, pos):
    old_ = cv.perspectiveTransform(
        np.array(compare["Pos"]).reshape(1, -1, 2).astype("float32"),
        trans_matrix)
    new_ = cv.perspectiveTransform(
        np.array(pos).reshape(1, -1, 2).astype("float32"), trans_matrix)
    diff = np.round((new_ - old_).reshape(2) / 100).astype("int")
    print(old_, new_, (new_ - old_), diff)
    name = np.add(diff, [ord(i) for i in compare["Name"]])
    name = "".join([chr(i) for i in name])
    return name
Beispiel #2
0
def draw_object(img1, kp1, img2, kps2, matches):
    """
    Draws the object found on the second image

    :param img1: First image
    :param kp1: List of keypoints from the first image
    :param img2: Second image
    :param kps2: List of keypoints from the second image
    :param matches: List of matches between the two images
    :return: modified image
    """
    src_pts = np.float32([kp1[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kps2[m.trainIdx].pt
                          for m in matches]).reshape(-1, 1, 2)

    M = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)[0]

    h, w = img1.shape[:2]
    pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                      [w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)
    img_res = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

    return img_res
def orb_stitcher(imgs):
    # find the keypoints with ORB
    orb1 = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

    kp_master, des_master = orb1.detectAndCompute(imgs[0], None)
    kp_secondary, des_secondary = orb1.detectAndCompute(imgs[1], None)

    matches = bf.match(des_secondary, des_master)
    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    selected = []
    for m in matches:
        if m.distance < 40:
            selected.append(m)

    out_img = cv2.drawMatches(imgs[1], kp_secondary, imgs[0], kp_master,
                              selected, None)
    cv2.namedWindow('www', cv2.WINDOW_NORMAL)
    cv2.imshow('www', out_img)
    # cv2.imwrite('matches.jpg',out_img)
    cv2.waitKey(0)

    warped = None
    if len(selected) > 10:

        dst_pts = np.float32([kp_master[m.trainIdx].pt
                              for m in selected]).reshape(-1, 1, 2)
        src_pts = np.float32([kp_secondary[m.queryIdx].pt
                              for m in selected]).reshape(-1, 1, 2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        h, w = imgs[0].shape[0:2]
        pts = np.float32([[0, 0], [w, 0], [w, h], [0, h],
                          [0, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        max_extent = np.max(dst, axis=0)[0].astype(np.int)[::-1]
        sz_out = (max(max_extent[1],
                      imgs[0].shape[1]), max(max_extent[0], imgs[0].shape[0]))

        # img2 = cv2.polylines(imgs[0], [np.int32(dst)], True, [0,255,0], 3, cv2.LINE_AA)

        cv2.namedWindow('w', cv2.WINDOW_NORMAL)

        warped = cv2.warpPerspective(imgs[1], M, dsize=sz_out)
        img_for_show = warped.copy()
        img_for_show[0:imgs[0].shape[0], 0:imgs[0].shape[1], 1] = imgs[0][:, :,
                                                                          1]
        cv2.imshow('w', img_for_show)
        cv2.waitKey(0)
    return warped
Beispiel #4
0
def outputLimits(hMat, imShape1, imShape2):

    h1, w1 = imShape1
    h2, w2 = imShape2
    pts1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)
    pts2 = np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2)
    pts2_ = cv2.perspectiveTransform(pts2, hMat)
    pts = np.concatenate((pts1, pts2_), axis=0)
    [xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)
    [xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)

    return xmin, xmax, ymin, ymax
def TransferAndSolvePoints(image_, data_):
    h, w = image_.shape[:2]
    pts_ = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                       [w - 1, 0]]).reshape(-1, 1, 2)
    dst_ = cv2.perspectiveTransform(pts_, data_)

    start_row = int(dst_[:, :, 1].min())
    start_col = int(dst_[:, :, 0].min())
    end_row = int(dst_[:, :, 1].max())
    end_col = int(dst_[:, :, 0].max())
    start_sec_col = int(dst_[:2, :, 0].max())

    return start_row, start_col, end_row, end_col, start_sec_col
Beispiel #6
0
def draw_match(result_title, img1, img2, kp_pairs, status=None, H=None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]

    # Create visualized result image
    vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
    vis[:h1, :w1] = img1
    vis[:h2, w1:w1 + w2] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    if H is not None:
        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32(
            cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(
                -1, 2) + (w1, 0))
        cv2.polylines(vis, [corners], True, (255, 255, 255))

    if status is None:
        status = np.ones(len(kp_pairs), np.bool_)
    p1, p2 = [], []  # python 2 / python 3 change of zip unpacking

    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))

    green = (0, 255, 0)
    red = (0, 0, 255)

    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
        if inlier:
            color = green
            cv2.circle(vis, (x1, y1), 2, color, -1)
            cv2.circle(vis, (x2, y2), 2, color, -1)
        else:
            color = red
            r = 2
            thickness = 3
            cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), color, thickness)
            cv2.line(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), color, thickness)
            cv2.line(vis, (x2 - r, y2 - r), (x2 + r, y2 + r), color, thickness)
            cv2.line(vis, (x2 - r, y2 + r), (x2 + r, y2 - r), color, thickness)

    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
        if inlier:
            cv2.line(vis, (x1, y1), (x2, y2), green)

    cv2.imshow(result_title, vis)

    return vis
Beispiel #7
0
def find_image_in_frame(dmatches, train_pts, new_pts, train_img_h,
                        train_img_w):
    src_pts = np.float32([train_pts[m.queryIdx].pt
                          for m in dmatches]).reshape(-1, 1, 2)
    dst_pts = np.float32([new_pts[m.trainIdx].pt
                          for m in dmatches]).reshape(-1, 1, 2)

    homography_matrix, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,
                                              5.0)
    pts = np.float32([[0, 0], [0, train_img_h - 1],
                      [train_img_w - 1, train_img_h - 1],
                      [train_img_w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, homography_matrix)

    return dst
Beispiel #8
0
def render(img, obj, projection, model):
    vertices = obj.vertices
    scale_matrix = np.eye(3) * 3
    h, w = model.shape

    for face in obj.faces:
        face_vertices = face[0]
        points = np.array([vertices[vertex - 1] for vertex in face_vertices])
        points = np.dot(points, scale_matrix)
        # render model in the middle of the reference surface. To do so,
        # model points must be displaced
        points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])
        dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)
        imgpts = np.int32(dst)
        cv2.fillConvexPoly(img, imgpts, (137, 27, 211))
    return img
Beispiel #9
0
def viewImage():
    img_ = right()
    # img_ = cv2.resize(img_, (0,0), fx=1, fy=1)
    img1 = cv2.cvtColor(img_, cv2.COLOR_BGR2GRAY)
    img = rear()

    # img = cv2.resize(img, (0,0), fx=1, fy=1)
    img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    sift = cv2.xfeatures2d.SIFT_create()
    # find the key points and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    match = cv2.BFMatcher()
    matches = match.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append(m)

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        flags=2)
    img3 = cv2.drawMatches(img_, kp1, img, kp2, good, None, **draw_params)
    cv2.imshow("original_image_drawMatches.jpg", img3)

    MIN_MATCH_COUNT = 10
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img1.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
        cv2.imshow("original_image_overlapping.jpg", img2)

    # dst = np.concatenate((rightImg, rearImg), 1)
    # cv2.imshow("Image", )
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Beispiel #10
0
def get_cadidates(screen):
    warped = cv.warpPerspective(screen, trans_matrix, target_size)
    filtered_map = cv.filter2D(warped, 0, filter_kernel)
    #     show(res)
    _, poses = s.search_resource("Corner", image=filtered_map)
    if len(poses) < 4:
        raise ValueError("Less than 4 anchors found. Stop.")
    poses = np.array(poses)
    poses += s.resources["Corner"]["Offset"]
    diff = poses % 100
    dx = np.argmax(np.bincount(diff[:, 0]))
    dy = np.argmax(np.bincount(diff[:, 1]))

    res = itertools.product(range(dx, target_size[0], 100),
                            range(dy, target_size[1], 100))
    res = (np.array(list(res), dtype="float") + 50).reshape(1, -1, 2)

    pos_in_screen = cv.perspectiveTransform(res,
                                            inv_trans).reshape(-1,
                                                               2).astype("int")
    return res.reshape(-1, 2).astype("int"), pos_in_screen
Beispiel #11
0
def render(img, obj, projection, model, color=False):
    vertices = obj.vertices
    scale_matrix = np.eye(3) * 0.5
    h, w, c = model.shape

    for face in obj.faces:
        face_vertices = face[0]
        points = np.array([vertices[vertex - 1] for vertex in face_vertices])
        points = np.dot(points, scale_matrix)
        # render model in the middle of the reference surface. To do so,
        # model points must be displaced
        points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])
        dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)
        imgpts = np.int32(dst)
        #if color is False:
        cv2.fillConvexPoly(img, imgpts, (35, 240, 90))
        #else:
        #    color = hex_to_rgb(face[-1])
        #    color = color[::-1] # reverse
        #    cv2.fillConvexPoly(img, imgpts, color)

    return img
        good.append([m])

# img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags = 2)
matchesMask = None
if len(good) > MIN_MATCH_COUNT:
    # what is it ?
    src_pts = np.float_([kp1[m[0].queryIdx].pt
                         for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float_([kp2[m[0].trainIdx].pt
                         for m in good]).reshape(-1, 1, 2)

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    matchesMask = mask.ravel().tolist()

    h, w = img1.shape
    pts = np.float_([[0, 0], [0, h - 1], [w - 1, h - 1],
                     [w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)

    img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

else:
    print "Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT)
    matchesMask = None

# img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,(0,255,0),None,matchesMask,2)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None)
cv2.imshow('match knn', img3)

cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #13
0
def tracking_orb():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    ret, frm = cap.read()
    img_chocolate = cv2.imread('marker.jpg')

    frm_count = 0
    key = None

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (frm.shape[1], frm.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_orb.mp4', fourcc, 30.0, image_size)

    while ret:

        ## Create ORB object and BF object(using HAMMING)
        orb = cv2.ORB_create()

        gray2 = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
        gray1 = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

        # gray2 = cv2.equalizeHist(gray2)
        # gray1 = cv2.equalizeHist(gray1)

        ## Find the keypoints and descriptors with ORB
        kpts1, descs1 = orb.detectAndCompute(gray1, None)
        kpts2, descs2 = orb.detectAndCompute(gray2, None)

        # create BFMatcher object
        ## match descriptors and sort them in the order of their distance
        bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)

        # Match descriptors.
        matches = bf.match(descs1, descs2)

        # Sort them in the order of their distance.
        dmatches = sorted(matches, key=lambda x: x.distance)

        ## extract the matched keypoints
        src_pts = np.float32([kpts1[m.queryIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kpts2[m.trainIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)

        ## find homography matrix and do perspective transform
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img_chocolate.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        ## draw found regions
        frm = cv2.polylines(frm, [np.int32(dst)], True, (0, 0, 255), 1,
                            cv2.LINE_AA)

        ## draw match lines
        res = cv2.drawMatches(img_chocolate,
                              kpts1,
                              frm,
                              kpts2,
                              dmatches[:8],
                              None,
                              flags=2)

        # writer.write(res)
        cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)
        # cv2.imshow("orb_match", frm)
        out.write(frm)
        cv2.imshow("orb_match", res)

        # Pause on pressing of space.
        if key == ord(' '):
            wait_period = 0
        else:
            wait_period = 30

        key = cv2.waitKey(wait_period)
        ret, frm = cap.read()
        frm_count += 1

    cv2.destroyAllWindows()
    cap.release()
    out.release()

    return 0
Beispiel #14
0
def tracking_lucas_kanade():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    img_chocolate = cv2.imread('marker.jpg')
    gray_chocolate = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=1000,
                          qualityLevel=0.2,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(35, 35),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (1000, 3))

    # Take first frame and find corners in it

    ret, old_frame = cap.read()

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

    orb = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
    kpts1, descs1 = orb.detectAndCompute(gray_chocolate, None)

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (old_frame.shape[1], old_frame.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_lucas_kanade.avi', fourcc, 30.0,
                          image_size)

    frno = 0
    restart = False
    while (1):
        frno += 1
        ret, frame = cap.read()
        if ret:

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if restart:
                orb = cv2.ORB_create(1000, 1.1, 13)
                kpts2, descs2 = orb.detectAndCompute(frame_gray, None)
                restart = False

            kpts2, descs2 = orb.detectAndCompute(frame_gray, None)

            matches = bf.match(descs1, descs2)
            # Sort them in the order of their distance.
            dmatches = sorted(matches, key=lambda x: x.distance)

            ## extract the matched keypoints
            src_pts = np.float32([kpts1[m.queryIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kpts2[m.trainIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)

            ## find homography matrix and do perspective transform
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            h, w = img_chocolate.shape[:2]
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)

            ## draw found regions
            frm = cv2.polylines(frame, [np.int32(dst)], True, (0, 0, 255), 1,
                                cv2.LINE_AA)

            # ## draw match lines
            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches[:8], None, flags=2)

            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                   dst_pts, None, **lk_params)
            successful = (st == 1)
            if np.sum(successful) == 0:
                restart = True
            # Select good points
            good_new = p1[successful]
            good_old = dst_pts[successful]

            # draw the tracks
            count_of_moved = 0
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                velocity = np.sqrt((a - c)**2 + (b - d)**2)
                if velocity > 1:
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1)
                    count_of_moved += 1

            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches, None, flags=2) #[:8]
            out.write(frame)

            cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)

            cv2.imshow('orb_match', frame)

            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

            # Now update the previous frame and previous points
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)

        else:
            break

    cv2.destroyAllWindows()
    cap.release()
    out.release()
Beispiel #15
0
def main():
    homography = None
    # matrix of camera parameters (made up but works quite well for me)
    camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])

    # create ORB - Oriented FAST and Rotated BRIEF - keypoint detector
    orb = cv2.ORB_create(nfeatures=1000)  # retain max 1000 features

    # create BFMatcher object
    bf = cv2.BFMatcher()

    # image target
    model = cv2.imread('target_img.jpg')

    # calculate key point and description
    kp_model, des_model = orb.detectAndCompute(
        model, None)  # kp: key point, des: description

    # obj file
    obj = OBJ('wolf.obj', swapyz=True)

    # Webcam
    webcam = cv2.VideoCapture(0)

    while True:
        success, imgwebcam = webcam.read()
        # find and draw the keypoints of the frame
        kp_webcam, des_webcam = orb.detectAndCompute(imgwebcam, None)

        # finding match between 2 img
        matches = bf.knnMatch(des_model, des_webcam, k=2)
        # Taking good keypoints
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)

        # compute Homography if enough matches are found
        if len(good) > 15:
            # differenciate between source points and destination points
            srcpts = np.float32([kp_model[m.queryIdx].pt
                                 for m in good]).reshape(-1, 1, 2)
            dstpts = np.float32([kp_webcam[m.trainIdx].pt
                                 for m in good]).reshape(-1, 1, 2)

            # compute Homography
            homography, mask = cv2.findHomography(srcpts, dstpts, cv2.RANSAC,
                                                  5)

            #find boundary around model
            h, w, channel = model.shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            # project corners into frame
            dst = cv2.perspectiveTransform(pts, homography)
            # connect them with lines
            #imgwebcam = cv2.polylines(imgwebcam,[np.int32(dst)], True, 255, 3, cv2.LINE_AA)

            # if a valid homography matrix was found render object on model plane
            if homography is not None:
                # obtain 3D projection matrix from homography matrix and camera parameters
                projection = projection_matrix(camera_parameters, homography)
                # render object
                imgwebcam = render(imgwebcam, obj, projection, model)
                #imgwebcam = render(imgwebcam, model, projection)

        cv2.imshow('result', imgwebcam)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    webcam.release()
    cv2.destroyAllWindows()
    return 0