Пример #1
0
import cv2 as cv
import numpy as np

from bills.algo.image_utils import scale_down

NUM_KEYPOINTS = 2000
HOMOGRAPHY_METHOD = cv.RANSAC
RANSAC_REPROJ_THRESHOLD = 4.0

orb_detector = cv.ORB_create(NUM_KEYPOINTS, patchSize=41, edgeThreshold=55)
# create BFMatcher object
bf_matcher = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)


def preapre_image_for_keypoints(img):
    return scale_down(img, 550, 550)


def get_keypoints(front):
    # find the keypoints and descriptors with ORB
    kp1, des1 = orb_detector.detectAndCompute(front, mask=None)
    return kp1, des1


def match_and_score(a1, a2, debug=False):
    (kp1, des1) = a1
    (kp2, des2) = a2
    matches = bf_matcher.match(des1, des2)
    src_pts = np.float32([kp1[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt
Пример #2
0
def find_centroids(apriltag_query_path, apriltag_frame_path):
    apriltag_query = cv2.imread(apriltag_query_path,0)
    apriltag_frame = cv2.imread(apriltag_frame_path, 0)    

    #Task:Initiate SIFT detector
    orb = cv2.ORB_create()

    #Task:Find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(apriltag_query,None)
    kp2, des2 = orb.detectAndCompute(apriltag_frame,None)

    #Task:Create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    #Task:Match descriptors.
    matches_ = bf.match(des1,des2)

    #Task:Sort them in the order of their distance.
    matches = sorted(matches_, key = lambda x:x.distance)
    good_matches = matches[:7]
    
    #Task:Get coordinates of all those matches(keypoints) in the frame
    list_kp1 = [kp1[mat.queryIdx].pt for mat in good_matches]
    temp_list_kp2 = [kp2[mat.trainIdx].pt for mat in good_matches]
    #print(temp_list_kp2)

    temp_dat = []
    for i in temp_list_kp2:
        temp_dat.append(apriltag_frame.shape[0]-i[1])

    list_kp2 = []
    for kp,temp in zip(temp_list_kp2,temp_dat):
        list_kp2.append((kp[0],temp))

    #Task:Make accessible lists for x and y coordinates
    x = np.array([int(item[0]) for item in list_kp2])
    y = np.array([int(item[1]) for item in list_kp2])
    X = np.array(list(zip(x,y))).reshape(len(x), 2)

    ###READ:UNSUPERVISED LEARNING METHOD
    #Task:Apply kmeans method to determine optimal k number of clusters in frame
    distortions = []
    K = range(1,len(X)+1)
    for k in K:
        kmeanModel = KMeans(n_clusters=k).fit(X)
        kmeanModel.fit(X)
        distortions.append(sum(np.min(cdist(X, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])

    #Get list of all the points on elbow plot; Get points at min(k) and max(k)
    k_points = []
    for k,d in zip(K,distortions):
        k_points.append((k,d))
        point1 = k_points[0]
        point2 = k_points[-1]
    
    #Task:Use elbow to find out optimal k
    temp_lst = (point1,point2)
    #Subtask 1: Get all x values and y values
    x_vals = np.array([item[0] for item in temp_lst]).reshape(-1,1)
    y_vals = np.array([item[1] for item in temp_lst]).reshape(-1,1)
    #Subtask 2: Create linear model and fit
    linear_regressor = LinearRegression()
    linear_regressor.fit(x_vals,y_vals)
    #Subtask 3: Use model to predict all values of k
    y_pred = []
    for k in K: 
        y_pred.append(linear_regressor.intercept_ + linear_regressor.coef_ * k)
        
    temp_points = np.array(y_pred).ravel()

    y_pred_points = []
    for k,p in zip(K,temp_points):
        y_pred_points.append((k,p))

    #Subtask 3: Find distance between all the elbow points and the points on line
    distances = []
    for k_pts,y_pts in zip(k_points,y_pred_points):
        distances.append(distance(k_pts,y_pts))
        
    #Subtask 4: OptK is the associated k-value of the max distance
    opt_k = []
    for k,d in zip(K,distances):
        if d == max(distances):
            opt_k = k

    #Task:Use optimal k to create the clusters
    kmeans_model = KMeans(n_clusters=opt_k).fit(X)

    #Task:Find the centroids of those clusters
    centers = np.array(kmeans_model.cluster_centers_)
    print(centers)
    
    return centers
import cv2
from matplotlib import pyplot as plt

img1=cv2.imread('human.jpg',cv2.IMREAD_COLOR); #image read in color
img1_gray=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY); #convert color of image from bgr2gray
img2=cv2.imread('human_rotate.jpg',cv2.IMREAD_COLOR); #image read in color
img2_gray=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY); #convert color of image from bgr2gray

orb=cv2.ORB_create(); #create an orb object
kp1,des1=orb.detectAndCompute(img1_gray,None); #detect and compute keypoints and descriptors on image
kp2,des2=orb.detectAndCompute(img2_gray,None); #detect keypoints and compute descriptors on image

#===>Create a brute-force matcher
bf=cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True); #create a bfmatcher
matches=bf.match(des1,des2); #match the descriptors together
matches=sorted(matches,key=lambda x:x.distance); #sort the matches from smallest to highest

res=cv2.drawMatches(img1,kp1,img2,kp2,matches[:20],None); #draw 20 matches on image

cv2.namedWindow('BFMatcher in orb',cv2.WINDOW_NORMAL); #create a named window
cv2.imshow('BFMatcher in orb',res); #Image show

if cv2.waitKey(0)&0xFF==ord('q'):
    cv2.destroyWindow('BFMatcher in orb'); #destroy window


Пример #4
0
import cv2
import numpy as np

cap = cv2.VideoCapture(0)
imgTarget = cv2.imread('TargetImage.jpg')
myVid = cv2.VideoCapture('Chicago_360p.mp4')

hT, wT, cT = imgTarget.shape
success, imgVideo = myVid.read()
imgVideo = cv2.resize(imgVideo, (wT, hT))

orb = cv2.ORB_create(nfeatures=1000)
kp1, des1 = orb.detectAndCompute(imgTarget, None)
# imgTarget = cv2.drawKeypoints(imgTarget , kp1 , None)

while True:
    success, imgWebcam = cap.read()
    imgWebcam = cv2.flip(imgWebcam, 1)

    kp2, des2 = orb.detectAndCompute(imgWebcam, None)
    # imgWebcam = cv2.drawKeypoints(imgWebcam , kp2 , None)

    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append(m)
    print(len(good))
    imgFeatures = cv2.drawMatches(imgTarget,
                                  kp1,
Пример #5
0
"""Dececting key points in images"""

import numpy
import cv2
from matplotlib import pyplot as plt
"""Load Image"""
img = cv2.imread("focusedMarchantia1.jpg", cv2.IMREAD_COLOR)
"""Initialise detector"""
orb = cv2.ORB_create()
"""find keypoints"""
kp = orb.detect(img, None)
"""Compute descriptors with orb"""
kp, des = orb.compute(img, kp)
"""Draw only keypoints location, not size or orientation"""
img2 = img.copy()
cv2.drawKeypoints(img, kp, img2, color=(255, 0, 0), flags=0)
plt.imshow(img2), plt.show()

#=============================================================================================================================#
"""Matching up key points in two images"""

import numpy
import cv2
from matplotlib import pyplot as plt
"""Load Images"""
img1 = cv2.imread("small crop.jpg", cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread("focusedMarchantia1.jpg", cv2.IMREAD_GRAYSCALE)
"""Initialise detector"""
orb = cv2.ORB_create()
"""find keypointsand compute"""
kp1, des1 = orb.detectAndCompute(img1, None)
Пример #6
0
def main(child_conn):

    # play the background sound
    mixer.music.load('sounds/background.mp3')
    mixer.music.play(-1)

    # Get the data from the XML
    solarSystem = ET.parse('planet_info.xml')
    celestialBodies = solarSystem.getroot()

    # Parse everything from XML into global variables
    for cBodies in celestialBodies:
        planets = cBodies.findall("planet")
        stars = cBodies.findall("star")
        if planets:
            for planet in planets:
                planet_list.append(
                    Planet(planet[0].text, planet[1].text, planet[2].text, planet[3].text, planet[4].text,
                           planet[5].text, planet[6].text, planet[7].text, planet[8].text))
        elif stars:  # since there is only one star (sun), we just add it into the list of planets
            for star in stars:
                planet_list.append(
                    Planet(star[0].text, star[1].text, star[2].text, star[3].text, star[4].text, star[5].text,
                           star[6].text, star[7].text, star[8].text))
        else:
            print("Nothing was read from the XML.")

    # Set up the camera
    cam, camera_height, camera_width = init_webcam()
    CAM_WIDTH = camera_width
    CAM_HEIGHT = camera_height

    # Load the image that is going to be projected
    projectionImage = cv2.imread(imageToBeProjected)
    shuttleIcon = cv2.imread(shuttleToBeDrawn, cv2.IMREAD_UNCHANGED) # read with the alpha channel

    # Draw markers on the image
    for marker_index, cp in enumerate(marker_points):
        marker_image = cv2.imread(marker_file_name[marker_index])
        h, w, d = marker_image.shape
        projectionImage[cp[1]:cp[1] + h, cp[0]:cp[0] + w] = marker_image.copy()
    h, w, d = projectionImage.shape

    # Create an opencv window to display the projection onto
    cv2.namedWindow("Projector", cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty("Projector", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
    cv2.imshow('Projector', projectionImage)
    cv2.namedWindow("Debug", cv2.WINDOW_NORMAL)

    while True:

        # work with a copy
        processedImage = projectionImage.copy()

        # Find keypoints in the projected image
        orb2 = cv2.ORB_create(nfeatures=500)
        projectionImage_kp = orb2.detect(processedImage, None)
        projectionImage_kp, projectionImage_des = orb2.compute(processedImage, projectionImage_kp)

        # Get an image from the camera
        ret_val, cameraImage = cam.read()

        # Get the matching features in the camera image using the descriptors from the projection image
        matches, cameraImage_kp = get_feature_matches(projectionImage_des, cameraImage)

        # if we can't find any matches, just keep displaying the image and inform the user
        if len(matches) <= MIN_MATCH_COUNT:
            cv2.imshow('Projector', processedImage)
            print('Could not find matches.')
            cv2.waitKey(10)
            continue

        # Now compute the Homography
        homographyMatrix, matchesMask = get_homography(matches, projectionImage_kp, cameraImage_kp)

        # Visualize!
        #show_matches(processedImage, cameraImage, projectionImage_kp, cameraImage_kp, homographyMatrix)

        # Get a virtual point on the image
        # Check first if the image is fully visible
        # Update the position to the new found position, otherwise keep the old one
        if isImageFullyVisible(homographyMatrix, projectedImageWidth, projectedImageHeight):
            smoothenMatrix(homographyMatrix)
            virtualPoint = virtual_point(smoothenedMatrix)
            updatedPoint = virtualPoint[0][0]
        else:
            updatedPoint = [p for p in trackedCenterPoint]

        # we don't want scattering or abrupt weird moves, so smoothen the motion
        smoothenCenterMotion(updatedPoint, DELTA_T)

        # convert tuple coordinates to list coordinates
        coordinates = []
        coordinates[0] = updatedPoint[0]
        coordinates[1] = updatedPoint[1]

        # send to blender
        child_conn.send(coordinates)
        child_conn.close()

        # get from the blender, wait for an answer
        #while not x.recv():
        #    planetname = x.recv(4096)

        print(planetname)

        # loop over the objects of planets
        for planet in planet_list:
            if planet.name == planetname:  # find the one that matches the one we landed
            # prepare the information of the planet we land
            info = prepare_info(planet)
            # get the font
            fontsize = 20
            font = ImageFont.truetype("spacefont.ttf", fontsize)
            # load the image to PIL format
            img_pil = Image.fromarray(processedImage)
            # draw the text
            draw = ImageDraw.Draw(img_pil)
            draw.text((DISPLAY_INFO_LOCATION_X, DISPLAY_INFO_LOCATION_Y), info, font=font, fill=(0, 255, 255, 0))  # color BGR

            # back to opencv format
            processedImage = np.array(img_pil)
            break

        # rotate the shuttle as the camera does
        # first though, get a copy
        toBeRotatedShuttle = shuttleIcon.copy()
        rows, cols, w = toBeRotatedShuttle.shape
        angle = get_camera_rotation(smoothenedMatrix)
        angleInDegrees = round(math.degrees(clean_asin(angle)), 2)  # convert radian to degrees
        rotationMatrix = cv2.getRotationMatrix2D((cols / 2, rows / 2), angleInDegrees, 1)
        toBeRotatedShuttle = cv2.warpAffine(toBeRotatedShuttle, rotationMatrix, (cols, rows), cv2.INTER_LANCZOS4)

        # Overlay transparent images at desired position(x,y) and scale.
        result = transparentOverlay(processedImage, toBeRotatedShuttle, tuple(trackedCenterPoint), 0.7)

        # Display the resulting projector image with a dot for the camera location
        cv2.imshow('Projector', processedImage)
        if cv2.waitKey(20) == ord('a'):
            break

    # When everything done, release the capture
    cam.release()
    cv2.destroyAllWindows()


if __name__ == "__main__":

    # execute only if run as a script
    main(child_conn)
Пример #7
0
    new_img.paste(re_img)
    new_img.save(NEW_IMG_FILE)

    # Create feature descriptors
    # print("Create feature descriptors")
    # print(NEW_IMG_FILE)
    img = cv.imread(NEW_IMG_FILE)
    # print(new_img.size)
    # print(NEW_IMG_FILE)

    # View Image
    # cv.imshow('image', img)
    # cv.waitKey(0)
    # cv.destroyAllWindows()

    orb = cv.ORB_create()
    kp = orb.detect(img, None)
    kp, des = orb.compute(img, kp)
    # print(kp,des)
    # View KeyPoints on Image
    # https://github.com/skvark/opencv-python/issues/168
    # print("View keypoints on new image")
    # img2 = img.copy()
    # for marker in kp:
    #     img2 = cv.drawMarker(img2, tuple(int(i) for i in marker.pt), color=(0, 255, 0))
    # plt.imshow(img2), plt.show()
    #
    # [print(point.pt, point.size, point.angle) for point in kp]

    # print(des)
    # print(kp)
Пример #8
0
def FindTransform(ima, imb, fd_type=None):
    """
    ima(DataArray of shape YaXa with uint8): Image to be aligned
    imb(DataArray of shape YbXb with uint8): Base image
        Note that the shape doesn't have to be any relationship with the shape of the
        first dimension(doesn't even need to be the same ratio)
    fd_type(None or str): Feature detector type. Must be 'SIFT' or 'ORB'. ORB is faster,
        but SIFT usually has better results. If None, it will pick the best available.
    return (ndarray of shape 3, 3): transformation matrix to align the first image on the
        base image. (right column is translation)
    raises:
    ValueError: if no good transformation is found.
    """

    # Instantiate the feature detector and the matcher
    # TODO: try BRISK, AZAKE and other detectors?
    if fd_type is None:
        for fd in ("SIFT", "ORB"):
            if hasattr(cv2, "%s_create" % fd):
                fd_type = fd
                break

    if fd_type == "ORB":
        feature_detector = cv2.ORB_create()
        if USE_BF:
            matcher = cv2.BFMatcher(normType=cv2.NORM_HAMMING)
        else:
            index_params = dict(
                algorithm=FLANN_INDEX_LSH,
                table_number=6,  # 12
                key_size=12,  # 20
                multi_probe_level=1)  # 2
            search_params = {}
            matcher = cv2.FlannBasedMatcher(index_params, search_params)
    elif fd_type == "SIFT":
        # Extra arguments for SIFT
        #         contrastThreshold = 0.04
        #         edgeThreshold = 10
        #         sigma = 1.6  # TODO: no need for Gaussian as preprocess already does it?
        feature_detector = cv2.SIFT_create(
            nfeatures=2000)  # avoid going crazy on keypoints
        if USE_BF:
            matcher = cv2.BFMatcher(normType=cv2.NORM_L2)
        else:
            # Note: with KDTree, every call returns slightly different matches,
            # which is quite annoying for reproducibility
            #             index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
            index_params = dict(algorithm=FLANN_INDEX_KMEANS)
            search_params = dict(checks=32)  # default value
            matcher = cv2.FlannBasedMatcher(index_params, search_params)
    else:
        raise ValueError("Unknown feature detector %s" % (fd_type, ))

    logging.debug("Using feature detector %s", fd_type)

    # find and compute the descriptors
    ima_kp, ima_des = feature_detector.detectAndCompute(ima, None)
    imb_kp, imb_des = feature_detector.detectAndCompute(imb, None)
    logging.debug("Found %d and %d keypoints", len(ima_kp), len(imb_kp))

    # run the matcher of the detected features
    if USE_KNN:
        # For each keypoint, return up to k(=2) best ones in the other image
        matches = matcher.knnMatch(ima_des, imb_des, k=2)

        # store all the good matches as per Lowe's ratio test
        dist_ratio = 0.75
        selected_matches = [
            m[0] for m in matches
            if len(m) == 2 and m[0].distance < m[1].distance * dist_ratio
        ]
    else:
        # For each keypoint, pick the closest one in the other image
        matches = matcher.match(ima_des, imb_des)

        # Pick up to the best 10 matches
        min_dist = 100  # almost random value
        selected_matches = [m for m in matches if m.distance < min_dist]
        selected_matches.sort(key=lambda m: m.distance)
        selected_matches = selected_matches[:10]

    logging.debug("Found %d matches and %d good ones", len(matches),
                  len(selected_matches))
    if len(selected_matches) < 5:
        raise ValueError(
            "Less than 5 common features (%d) detected on the images" %
            (len(selected_matches), ))

    # get keypoints for selected matches
    selected_ima_kp = [list(ima_kp[m.queryIdx].pt) for m in selected_matches]
    selected_imb_kp = [list(imb_kp[m.trainIdx].pt) for m in selected_matches]
    selected_ima_kp = numpy.array([selected_ima_kp])
    selected_imb_kp = numpy.array([selected_imb_kp])

    ima_mkp = [ima_kp[m.queryIdx] for m in selected_matches]
    imb_mkp = [imb_kp[m.trainIdx] for m in selected_matches]

    # testing detecting the matching points automatically
    try:
        mat, mask = cv2.findHomography(selected_ima_kp, selected_imb_kp,
                                       cv2.RANSAC)
    except Exception:
        raise ValueError("The images does not match")

    if mat is None:
        raise ValueError("The images does not match")

    return mat, ima_kp, imb_kp, ima_mkp, imb_mkp
Пример #9
0
 def __init__(self):
     self.orb = cv2.ORB_create(1000)
Пример #10
0
def main():
    # Setup Log
    logging.basicConfig(filename='Stitch_image.log',
                        level=logging.DEBUG,
                        format='%(asctime)s - %(levelname)s - %(message)s',
                        filemode='w')

    # Setup argument parser
    ap = argparse.ArgumentParser()
    ap.add_argument("-d", "--directory", required=True, help="image directory")

    args = vars(ap.parse_args())

    # Save Parameters
    input_directory = args['directory']

    # Print input parameters to log
    logging.info('Input Parameters:\n'
                 '\tDirectory: {}\n'.format(input_directory))

    # Create output image folder if doesn't exits
    output_image_folder = os.path.join(
        os.path.dirname(input_directory),
        'output_images/{}'.format(args['directory']))
    if not os.path.exists(output_image_folder):
        os.mkdir(output_image_folder)

    ######
    image_directory = os.path.join(definitions.ROOT_DIR, input_directory)
    image_RGB_list = []
    image_GRAY_list = []
    for image_file_name in sorted(os.listdir(image_directory), reverse=True):
        logging.info('Processing image {}'.format(image_file_name))
        image_path = os.path.join(image_directory, image_file_name)
        gray_image = cv2.imread(image_path, 0)
        image_GRAY_list.append(gray_image)
        rgb_image = cv2.imread(image_path)
        image_RGB_list.append(rgb_image)

    # Test 2 images
    orb = cv2.ORB_create()
    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    H_list = []
    mosaic_image = [image_RGB_list[0]]
    offset_list = []
    try:
        for i in range(len(image_GRAY_list) - 1):
            # find the keypoints and descriptors with SIFT
            train_GRAY_image = image_GRAY_list[i]
            train_BGR_image = equalize_image(image_RGB_list[i])
            query_GRAY_image = image_GRAY_list[i + 1]
            query_BGR_image = equalize_image(image_RGB_list[i + 1])
            kp_train, des_train = orb.detectAndCompute(train_GRAY_image, None)
            kp_query, des_query = orb.detectAndCompute(query_GRAY_image, None)

            # Match descriptors.
            matches = bf.match(des_query, des_train)

            # Sort them in the order of their distance.
            matches = sorted(matches, key=lambda x: x.distance)

            # Compute H Matrix implementing RANSAC algorithm
            H = compute_H_from_RANSAC(kp_query, kp_train, matches)
            if not H_list:
                pass
            else:
                H = np.matmul(H_list[i], H)
            H_list.append(H)

            # Compute size of mosaic
            mosaic_x_range, mosaic_y_range, train_image_x_offset, train_image_y_offset, x,y \
                = compute_size_of_mosaic(mosaic_image[i], query_GRAY_image, H_list[i])
            train_image_x_offset = int(math.ceil(train_image_x_offset))
            train_image_y_offset = int(math.ceil(train_image_y_offset))

            mosaic_x_range = int(math.ceil(np.shape(mosaic_image[i])[1]))
            mosaic_y_range = int(math.ceil(np.shape(mosaic_image[i])[0]))

            im_out = cv2.warpPerspective(
                query_BGR_image, H_list[i],
                (math.ceil(mosaic_x_range), math.ceil(mosaic_y_range)))

            im_out[train_image_y_offset:y + train_image_y_offset,
                   train_image_x_offset:x +
                   train_image_x_offset] = train_BGR_image

            offset_list.append([train_image_y_offset, train_image_x_offset])

            save_stiched_image(output_image_folder, im_out, i)

            mosaic_image.append(im_out)
    except:
        pass
Пример #11
0
roi = [[(98, 984), (680, 1074), 'text', 'Name'],
       [(740, 980), (1320, 1078), 'text', 'Phone'],
       [(98, 1154), (150, 1200), 'box', 'Sign'],
       [(738, 1152), (790, 1200), 'box', 'Allergic'],
       [(100, 1418), (686, 1518), 'text', 'Email'],
       [(740, 1416), (1318, 1512), 'text', 'ID'],
       [(110, 1598), (676, 1680), 'text', 'City'],
       [(748, 1592), (1328, 1686), 'text', 'Country']]

path = "Resources/Query.png"
imgQ = cv2.imread(path)
h,w,c = imgQ.shape
#imgQ = cv2.resize(imgQ, (w//3, h//3))

orb = cv2.ORB_create(1000) #To create our ORB (Oriented FAST and Rotated BRIEF) detector we can simply write
kp1, des1 = orb.detectAndCompute(imgQ, None)
#imgKp1 = cv2.drawKeypoints(img, kp1, None)


path = 'UserForms'
myPicList = os.listdir(path)
print(myPicList)
for j,y in enumerate(myPicList):
    img = cv2.imread(path +"/"+y)
    #img = cv2.resize(img, (w // 3, h // 3))
    #cv2.imshow(y, img)
    kp2, des2 = orb.detectAndCompute(img, None)
    bf = cv2. BFMatcher(cv2.NORM_HAMMING)
    matches = bf.match(des2,des1)
    matches.sort(key = lambda x: x.distance)
Пример #12
0
def orb_desc(images_path, good_image_path, pref, f, name_dataset):
    images = []
    for file in os.listdir(images_path):
        if file.endswith(".jpg"):
            images.append(cv2.imread(images_path + file))

    gray_images = []
    for image in images:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray_images.append(gray)

    good_image = cv2.imread(good_image_path)
    good_gray_image = cv2.cvtColor(good_image, cv2.COLOR_BGR2GRAY)

    detector = cv2.ORB_create()
    results = []
    for image in gray_images:
        (kps, desc) = detector.detectAndCompute(image, None)
        #br = cv2.BRISK_create();
        #(kps,desc) = br.compute(image, kps)
        results.append((kps, desc, image))

    (kps_good, descs_good) = detector.detectAndCompute(good_gray_image, None)
    # br_g = cv2.BRISK_create();
    #(kps_good,descs_good) = br_g.compute(image, kps_good)

    i = 0

    for (kps, desc, image) in results:
        f.write(pref + " Image " + name_dataset + str(i) + "\n")
        f.write("keypoints: {}, descriptors: {}".format(len(kps), desc.shape) +
                "\n")
        print("keypoints: {}, descriptors: {}".format(len(kps), desc.shape))
        print("i: ", i)
        start_time = time.time()

        # create BFMatcher object
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        # Match descriptors.
        matches = bf.match(desc, descs_good)
        print("matches")

        # Sort them in the order of their distance.
        matches = sorted(matches, key=lambda x: x.distance)
        matches2 = np.asarray(matches)
        good = np.array([])
        #for m,n in matches:
        #    if m.distance < 0.9:
        #       good.append([m])
        img3 = cv2.drawMatches(image,
                               kps,
                               good_gray_image,
                               kps_good,
                               matches,
                               good,
                               flags=2)
        cv2.imwrite(
            "results_orb/" + pref + "res" + name_dataset + str(i) + ".jpg",
            img3)

        # img3 = cv2.drawMatches(image, kps, good_gray_image, kps_good, good, None, flags=2)
        #cv2.imwrite("../results_fast/"+pref+"res"+name_dataset+str(i)+".jpg", img3)
        f.write("Time: {}\n".format(time.time() - start_time))
        i = i + 1
Пример #13
0
def calc_features(data):
    orb = cv2.ORB_create()
    # sift = cv2.xfeatures2d.SIFT_create()
    # surf = cv2.xfeatures2d.SURF_create()
    # brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()

    progresser = tqdm(iterable=range(0, len(data)),
                      desc='calc video features',
                      total=len(data),
                      unit='files')

    feat, targets = [], []
    for i in progresser:
        clip = data[i]
        rm_list = []

        for i, sample in enumerate(clip.data_samples):

            dist_nose = []
            dist_mouth = []
            dist_mouth_down = []
            dist_mouth_right = []
            dist_mouth_left = []

            dist_eye_left = []
            dist_eye_right = []

            dist_brown_left = []
            dist_brown_right = []

            kp = []

            lm_nose = sample.landmarks[30]  # point on the nose

            lm_mouth = sample.landmarks[63]
            lm_mouth_down = sample.landmarks[58]
            lm_mouth_right = sample.landmarks[65]
            lm_mouth_left = sample.landmarks[61]

            lm_left_brown = sample.landmarks[20]
            lm_right_brown = sample.landmarks[25]

            lm_eye_left = sample.landmarks[42]
            lm_eye_right = sample.landmarks[44]

            img = cv2.copyMakeBorder(sample.image, BORDER, BORDER, BORDER, BORDER, cv2.BORDER_REPLICATE)

            for j in range(len(sample.landmarks)):
                lm = sample.landmarks[j]
                dist_nose.append(np.sqrt((lm_nose[0] - lm[0]) ** 2 + (lm_nose[1] - lm[1]) ** 2))

                dist_mouth.append(np.sqrt((lm_mouth[0] - lm[0]) ** 2 + (lm_mouth[1] - lm[1]) ** 2))
                dist_mouth_down.append(np.sqrt((lm_mouth_down[0] - lm[0]) ** 2 + (lm_mouth_down[1] - lm[1]) ** 2))
                dist_mouth_left.append(np.sqrt((lm_mouth_left[0] - lm[0]) ** 2 + (lm_mouth_left[1] - lm[1]) ** 2))
                dist_mouth_right.append(np.sqrt((lm_mouth_right[0] - lm[0]) ** 2 + (lm_mouth_right[1] - lm[1]) ** 2))

                dist_eye_left.append(np.sqrt((lm_eye_left[0] - lm[0]) ** 2 + (lm_eye_left[1] - lm[1]) ** 2))
                dist_eye_right.append(np.sqrt((lm_eye_right[0] - lm[0]) ** 2 + (lm_eye_right[1] - lm[1]) ** 2))


                dist_brown_left.append(np.sqrt((lm_left_brown[0] - lm[0]) ** 2 + (lm_left_brown[1] - lm[1]) ** 2))
                dist_brown_right.append(np.sqrt((lm_right_brown[0] - lm[0]) ** 2 + (lm_right_brown[1] - lm[1]) ** 2))
                p = cv2.KeyPoint(lm[0] + BORDER, lm[1] + BORDER, _size=120)
                kp.append(p)


            _, desk = orb.compute(img, kp)

            #_, desk_brief = brief.compute(img, kp)
            # _, desk_sift = sift.compute(img, kp)
            # _, desk_surf = surf.compute(img, kp)
            # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            #lbp = local_binary_pattern(gray, NPOINTS_LBP, RADIUS_LPB, 'uniform')
            #(hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, NPOINTS_LBP + 3), range=(0, NPOINTS_LBP + 2))
            #hist = hist.astype("float") # normalize the histogram
            #hist /= (hist.sum() + 1e-7)

            if check_dim_desc([
                desk ]):
                feat.append(
                    desk.flatten().tolist() +
                    # desk_brief.flatten().tolist() +
                    # desk_sift.flatten().tolist() +
                    # desk_surf.flatten().tolist() +
                    # hist.tolist() +
                    dist_nose + 
                    dist_mouth +
                    dist_mouth_down +
                    dist_mouth_right + 
                    dist_mouth_left +
                    dist_brown_right +
                    dist_brown_left +
                    dist_eye_left + 
                    dist_eye_right
                    )
                targets.append(sample.labels)
            else:
                rm_list.append(sample)

        # feat.append(features_per_clip)
        # targets.append(clip.labels)
        # feat_len.append(len(features_per_clip))

        if rm_list:
            for sample in rm_list:
                clip.data_samples.remove(sample)

    # count_vector_frames_per_clip = Counter(feat_len)
    # optimal_size = 30
    # max_frame_vector_per_clip = 39 # max([k for k in count_vector_frames_per_clip])
    # min_frame_vector_per_clip = min([k for k in count_vector_frames_per_clip])
    # print('frame_vector_len:', len(feat[0][0]))
    # print('max frames vectors per clip:', max_frame_vector_per_clip)
    # print('min frames vectors per clip:', min_frame_vector_per_clip)
    
    #feat_transform = []
    #for f_v in feat:
    #  if len(f_v) == optimal_size:
    #      feat_transform.append([el for v in f_v for el in v])
    #  else:
    #      # f_v = f_v + [np.zeros(len(feat[0][0])) for i in range(0, max_frame_vector_per_clip - len(f_v))]
    #      feat_transform.append([el for v in f_v for el in v])
    #print('Check feat_v clips len:', Counter([len(f_v) for f_v in feat_transform]))
    
    print('objects count:', len(feat))
    print('unique labels count:', len(set(targets)))
    return np.asarray(feat, dtype=np.float32), np.asarray(targets, dtype=np.float32)
Пример #14
0
    def detect(self, imggray, outimg=None):
        h, w = imggray.shape
        hlist = []

        # Create a keypoint detector if needed:
        if not hasattr(self, 'detector'):
            self.detector = cv2.ORB_create()

        # Load training image and detect keypoints on it if needed:
        if not hasattr(self, 'refkp'):
            refimg = cv2.imread(self.fname, 0)
            self.refkp, self.refdes = self.detector.detectAndCompute(
                refimg, None)

            # Also store corners of reference image and of window for homography mapping:
            refh, refw = refimg.shape
            self.refcorners = np.float32([[0.0, 0.0], [0.0,
                                                       refh], [refw, refh],
                                          [refw, 0.0]]).reshape(-1, 1, 2)
            self.wincorners = np.float32(
                [[
                    self.winleft * refw / self.owm,
                    self.wintop * refh / self.ohm
                ],
                 [
                     self.winleft * refw / self.owm,
                     (self.wintop + self.winh) * refh / self.ohm
                 ],
                 [(self.winleft + self.winw) * refw / self.owm,
                  (self.wintop + self.winh) * refh / self.ohm],
                 [(self.winleft + self.winw) * refw / self.owm,
                  self.wintop * refh / self.ohm]]).reshape(-1, 1, 2)
            jevois.LINFO(
                "Extracted {} keypoints and descriptors from {}".format(
                    len(self.refkp), self.fname))

        # Compute keypoints and descriptors:
        kp, des = self.detector.detectAndCompute(imggray, None)
        str = "{} keypoints".format(len(kp))

        # Create a matcher if needed:
        if not hasattr(self, 'matcher'):
            self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        # Compute matches between reference image and camera image, then sort them by distance:
        matches = self.matcher.match(des, self.refdes)
        matches = sorted(matches, key=lambda x: x.distance)
        str += ", {} matches".format(len(matches))

        # Keep only good matches:
        lastidx = 0
        for m in matches:
            if m.distance < self.distth: lastidx += 1
            else: break
        matches = matches[0:lastidx]
        str += ", {} good".format(len(matches))

        # If we have enough matches, compute homography:
        corners = []
        wincorners = []
        if len(matches) >= 10:
            obj = []
            scene = []

            # Localize the object (see JeVois C++ class ObjectMatcher for details):
            for m in matches:
                obj.append(self.refkp[m.trainIdx].pt)
                scene.append(kp[m.queryIdx].pt)

            # compute the homography
            hmg, mask = cv2.findHomography(np.array(obj), np.array(scene),
                                           cv2.RANSAC, 5.0)

            # Check homography conditioning using SVD:
            u, s, v = np.linalg.svd(hmg, full_matrices=False)

            # We need the smallest eigenvalue to not be too small, and the ratio of largest to smallest eigenvalue to be
            # quite large for our homography to be declared good here. Note that linalg.svd returns the eigenvalues in
            # descending order already:
            if s[-1] > 0.001 and s[0] / s[-1] > 100:
                # Project the reference image corners to the camera image:
                corners = cv2.perspectiveTransform(self.refcorners, hmg)
                wincorners = cv2.perspectiveTransform(self.wincorners, hmg)

        # Display any results requested by the users:
        if outimg is not None and outimg.valid():
            if len(corners) == 4:
                jevois.drawLine(outimg, int(corners[0][0, 0] + 0.5),
                                int(corners[0][0, 1] + 0.5),
                                int(corners[1][0, 0] + 0.5),
                                int(corners[1][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[1][0, 0] + 0.5),
                                int(corners[1][0, 1] + 0.5),
                                int(corners[2][0, 0] + 0.5),
                                int(corners[2][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[2][0, 0] + 0.5),
                                int(corners[2][0, 1] + 0.5),
                                int(corners[3][0, 0] + 0.5),
                                int(corners[3][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[3][0, 0] + 0.5),
                                int(corners[3][0, 1] + 0.5),
                                int(corners[0][0, 0] + 0.5),
                                int(corners[0][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
            jevois.writeText(outimg, str, 3, h + 4, jevois.YUYV.White,
                             jevois.Font.Font6x10)

        # Return window corners if we did indeed detect the object:
        hlist = []
        if len(wincorners) == 4: hlist.append(wincorners)

        return hlist
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import pygame
import pygame.image
from pygame.locals import *
import numpy as np
import pickle
import cv2

import camera

width, height = 1280, 720
sift = cv2.ORB_create()
model = cv2.imread('model.jpg', 0)
model = cv2.resize(model, (int(width / 1.2), int(height / 1.2)))
kp1, des1 = sift.detectAndCompute(model, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

# K = np.array([[640, 0, 320], [0, 480, 240], [0, 0, 1]])
K = np.array([[1186, 0, 656], [0, 1168, 380], [0, 0, 1]])


def set_projection_from_camera(K):
    glMatrixMode(GL_PROJECTION)
    glLoadIdentity()

    fx = float(K[0, 0])
    fy = float(K[1, 1])
    fovy = 2 * np.arctan(0.5 * height / fy) * 180 / np.pi
    aspect = (width * fy) / (height * fx)
Пример #16
0
def kp_orb_detection(img, num_kp):
    # find features using orb
    orb = cv2.ORB_create(nfeatures=num_kp).detect(img)
    return orb
Пример #17
0
smoothenedMatrix = np.float32([[1, 0, 0], [0, 1, 0], [0, 0, 1]])

# Global variable to hold all celestial bodies
planets = stars = planet_list = []

# images to be loaded
imageToBeProjected = 'solar_system2.png'
shuttleToBeDrawn = 'shuttleIcon.png'

# marker stuff
marker_file_name = ["markers/marker_one_small.png", "markers/marker_two_small.png", "markers/marker_three_small.png", "markers/marker_four_small.png"]
marker_points = [[0, 0], [0, projectedImageHeight - 100], [projectedImageWidth - 100, 0], [projectedImageWidth - 100, projectedImageHeight - 100]]

# initialize the feature detector
# we use orb, make it ready
orb = cv2.ORB_create(nfeatures=500)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = []
matchesMask = []

"""Specific class which is used to read template images with filenames associated with it"""
class PlanetTemplateImage:
    def __init__(self, img_name):
        self.img = cv2.imread(img_name, 0)
        self.__name = img_name

    def __str__(self):
        return self.__name


"""
Пример #18
0
def kp_orb_descriptor(img, kp, num_kp):
    # compute the descriptors with orb
    kp, desc = cv2.ORB_create(nfeatures=num_kp).compute(img, kp)
    return desc
Пример #19
0
# percent of the best compare matches
percent = 25
pixelThreshold = 160

roi = [[(123, 65), (259, 76), 'text', 'Name'],
       [(71, 214), (90, 224), 'box', 'Vehicletype']]

pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'

# load query image as compare default
imQ = cv2.imread('sampleData/Query.jpg')
h, w, c = imQ.shape

# image detection
orb = cv2.ORB_create(1100)
kp1, des1 = orb.detectAndCompute(imQ, None)

path = 'compareData'
myPictureList = os.listdir(path)
#print(myPictureList)
for j, y in enumerate(myPictureList):
    image = cv2.imread(path + "/" + y)
    kp2, des2 = orb.detectAndCompute(image, None)
    bruteForce = cv2.BFMatcher(cv2.NORM_HAMMING)
    matches = bruteForce.match(des2, des1)
    # lower the distance equals better that result
    matches.sort(key=lambda x: x.distance)
    good = matches[:int(len(matches) * (percent / 100))]
    # imageMatch = cv2.drawMatches(image, kp2, imQ, kp1, good[:50], None, flags=2)
    # cv2.imshow(y, imageMatch)
Пример #20
0
def orb_sift_ransac(image_orig, image_warp, kp='sift'):
    """compute transformation matrix H from img_warp to img_orig
    """
    if kp == 'orb':
        print('ORB')
        detector = cv2.ORB_create() #cv2.xfeatures2d.ORB_create()
    elif kp == 'sift':
        print('SIFT')
        detector = cv2.xfeatures2d.SIFT_create()

    kp_warp = detector.detect(image_warp ,None)
    kp_warp, des_warp = detector.compute(image_warp, kp_warp)

    kp_orig = detector.detect(image_orig ,None)
    kp_orig, des_orig = detector.compute(image_orig, kp_orig)

    MIN_MATCH_COUNT = 10
    if des_warp is None or des_orig is None:
        return None, None  #np.identity(3)
    if len(des_warp) < MIN_MATCH_COUNT or len(des_orig) < MIN_MATCH_COUNT:
        return None, None

    M, matchesMask, img_match = None, None, None

    if 1:
        # create BFMatcher object
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        # Match descriptors.
        if kp == 'sift':
            matches = bf.match(des_warp.astype(np.uint8), des_orig.astype(np.uint8))
        else:
            matches = bf.match(des_warp, des_orig)

        # Sort them in the order of their distance.
        matches = sorted(matches, key = lambda x:x.distance)
        good = matches[:25] # we take the first 25 matches
        if len(good) >= MIN_MATCH_COUNT:
            src_pts = np.float32([kp_warp[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([kp_orig[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()
    else:
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(np.float32(des_warp), np.float32(des_orig), k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
            #good.append(m)
        if len(good) >= MIN_MATCH_COUNT:
            src_pts = np.float32([kp_warp[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([kp_orig[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()

    ###### tmp
    if matchesMask:
        draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                    singlePointColor = None,
                    #matchesMask = matchesMask, # draw only inliers
                    flags = 2)
        img_match = cv2.drawMatches(image_warp, kp_warp, image_orig, kp_orig, good, None,**draw_params)

    return M, img_match
Пример #21
0
camera.shutter_speed = 20000
#camera.contrast = 100

#camera.image_effect = ''

rawCapture = PiRGBArray(camera, size=(dimx, dimy))
c = 0

tmp = cv2.imread("target_qrcode/barcode2.png")

target_barcode = cv2.resize(tmp, (96, 96))

orb = cv2.ORB_create(nfeatures=100,
                     nlevels=6,
                     scaleFactor=1.2,
                     patchSize=31,
                     edgeThreshold=15,
                     fastThreshold=1)

kp_target = orb.detect(target_barcode, None)

print([kp.size for kp in kp_target])

#kp_target = cv2.KeyPoint(64,64, 63.0)
kp_target_, des_target_ = orb.compute(target_barcode, kp_target)


def remove_similar_feature(kp, des, threshold=40):
    flag = False

    print("number of kp:", len(kp))
Пример #22
0
def transformMap(groundTruthFile,
                 saveFile,
                 testPath,
                 saveFolder,
                 poorMapsfolder,
                 hit,
                 miss,
                 unknown,
                 feature='sift'):
    if feature == 'orb':
        orb = cv2.ORB_create()
    if feature == 'sift':
        sift = cv2.xfeatures2d.SIFT_create()

    print "Load Images: "
    print "Loading: %s" % (groundTruthFile)
    gtImage = cv2.imread(groundTruthFile, 0)
    print "Loaded: %s" % (groundTruthFile)
    cv2.imwrite(saveFile, gtImage)
    height, width = gtImage.shape
    if feature == 'orb':
        kp1, des1 = orb.detectAndCompute(gtImage, None)
    if feature == 'sift':
        kp1, des1 = sift.detectAndCompute(gtImage, None)
    des1 = np.asarray(des1, np.float32)

    for fileN in os.listdir(testPath):
        if not fileN.endswith(".pgm"):
            continue
        imFile = "%s%s" % (testPath, fileN)
        imFileW = "%s%s" % (saveFolder, fileN)
        imFileP = "%s%s" % (poorMapsfolder, fileN)
        print "Loading: %s" % (imFile)
        im = cv2.imread(imFile, 0)
        print "Loaded!"
        if feature == 'orb':
            kp2, des2 = orb.detectAndCompute(im, None)
        if feature == 'sift':
            kp2, des2 = sift.detectAndCompute(im, None)
        des2 = np.asarray(des2, np.float32)
        print "Featur points detected."
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des2, des1, k=2)
        print "Points matched."

        good = deque()
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)
        print len(good)
        if len(good) < 1:
            print "Not enough points!"
            cv2.imwrite(imFileP, im)
            continue
        dst_pts = np.float32([kp2[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        src_pts = np.float32([kp1[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        M = cv2.estimateRigidTransform(src_pts, dst_pts, False)
        if M != None:
            M = np.asarray(M, np.float32)
            M = cv2.invertAffineTransform(M)
            imW = cv2.warpAffine(im, M, (width, height), cv2.INTER_NEAREST)
            imW = imW - hit
            imW = cv2.threshold(imW, unknown - 2 - hit, 255, 3)
            # (255-(imW+hit))-(255-miss)
            imW = miss - hit - imW[1]
            imW = cv2.threshold(imW, miss - unknown, 255, 3)
            # 255-(imw[1]+(255-miss))
            imW = miss - imW[1]
            cv2.imwrite(imFileW, imW)
        else:
            print "No homography found!"
            cv2.imwrite(imFileP, im)
Пример #23
0
import numpy as np
import cv2
from matplotlib import pyplot as plt

im1 = cv2.imread('piano4.jpg')
im2 = cv2.imread('piano5.jpg')
gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

orb = cv2.ORB_create(nfeatures=100, nlevels=1, edgeThreshold=30)

corners1 = cv2.cornerHarris(gray1, 2, 3, 0.04)
corners2 = cv2.cornerHarris(gray2, 2, 3, 0.04)
kpCorners1 = np.argwhere(corners1 > 0.01 * corners1.max())
kpCorners2 = np.argwhere(corners2 > 0.01 * corners2.max())
kpCorners1 = [cv2.KeyPoint(pt[1], pt[0], 3) for pt in kpCorners1]
kpCorners2 = [cv2.KeyPoint(pt[1], pt[0], 3) for pt in kpCorners2]
kp1, des1 = orb.compute(gray1, kpCorners1)
kp2, des2 = orb.compute(gray2, kpCorners2)

# Matching
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
im3 = cv2.drawMatches(im1, kp1, im2, kp2, matches[:10], outImg=None)

# Show
cv2.imshow('result', im3)
cv2.waitKey(0)
cv2.imwrite('result.png', im3)
Пример #24
0
training_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Display the images
plt.subplot(121)
plt.title('Original Training Image')
plt.imshow(training_image)
plt.subplot(122)
plt.title('Gray Scale Training Image')
plt.imshow(training_gray, cmap='gray')
plt.show()

# Locating key points using ORB
plt.rcParams['figure.figsize'] = [14.0, 7.0]
# Set the parameters of the ORB algorithm by specifying the maximum number of keypoints to locate and
# the pyramid decimation ratio
orb = cv2.ORB_create(2000, 2.0)
# Find the keypoints in the gray scale training image and compute their ORB descriptor.
# The None parameter is needed to indicate that we are not using a mask.
keypoints, descriptor = orb.detectAndCompute(training_gray, None)

# Create copies of the training image to draw our keypoints on
keyp_without_size = copy.copy(training_image)
keyp_with_size = copy.copy(training_image)

# Draw the keypoints without size or orientation on one copy of the training image
cv2.drawKeypoints(training_image,
                  keypoints,
                  keyp_without_size,
                  color=(0, 255, 0))

# Draw the keypoints with size and orientation on the other copy of the training image
def search_lock(target, algo):
    #Capturing Real Time Video
    cap = cv2.VideoCapture(0)
    #width
    cap.set(3, 432)
    #height
    cap.set(4, 432)
    if (algo == "orb"):
        #ORB
        orb = cv2.ORB_create()
        keypoints, descriptors = orb.detectAndCompute(target, None)
        output = cv2.drawKeypoints(target, keypoints, None)

        #Flann parameters for ORB
        indexparameters = dict(
            algorithm=6,
            table_number=12,  #6, # 12
            key_size=20,  #12,     # 20
            multi_probe_level=2)  #1) #2

        searchparameters = dict(checks=30)
        flann = cv2.FlannBasedMatcher(indexparameters, searchparameters)

    #for  SURF
    if (algo == "surf"):
        #SURF
        surf = cv2.xfeatures2d.SURF_create()  # FEATURES
        keypoints, descriptors = surf.detectAndCompute(target,
                                                       None)  # KEYPOINTS
        output = cv2.drawKeypoints(
            target, keypoints, None
        )  #outImage	=	cv.drawKeypoints(	image, keypoints, outImage[, color[, flags]]	)

        #Flann parameters for SURF
        indexparameters1 = dict(algorithm=0, trees=5)  #
        searchparameters1 = dict()
        flann = cv2.FlannBasedMatcher(indexparameters1, searchparameters1)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (100, 3))

    if cap.isOpened():
        ret, frame = cap.read()
        #cap.read() returns a bool (True/False). If frame is read correctly, it will be True. So you can check end of the video by checking this return value.
        #ret will store that bool value

    else:
        ret = False

    while ret:
        cv2.imshow("ORB-TARGET", output)

        ret, frame = cap.read()
        #frame2 = frame
        #cv2.imshow("Live Video Feed",frame)
        #gaussianBlur = cv2.GaussianBlur(frame,(5,5),0)
        medianBlur = cv2.medianBlur(frame, 5)
        grayFrame = cv2.cvtColor(medianBlur, cv2.COLOR_BGR2GRAY)  #GRAYSCALE

        if (algo == "orb"):
            print("ORB ALGORITHM ")
            #ORB ALGORITHM APPLIED TO REAL TIME CAPTURING
            keypoints_grayFrame, descriptors_grayFrame = orb.detectAndCompute(
                grayFrame, None)
            show_keypoints_grayFrame = cv2.drawKeypoints(
                grayFrame, keypoints_grayFrame, None)
            #cv2.imshow("Real Time Cap orb", show_keypoints_grayFrame_orb)

        if (algo == "surf"):
            print("SURF ALGORITHM ")
            #SURF ALGORITHM APPLIED TO REAL TIME CAPTURING
            keypoints_grayFrame, descriptors_grayFrame = surf.detectAndCompute(
                grayFrame, None)
            show_keypoints_grayFrame = cv2.drawKeypoints(
                grayFrame, keypoints_grayFrame, None)
            cv2.imshow("Real Time Cap surf", show_keypoints_grayFrame)

        matches_flann = flann.knnMatch(descriptors, descriptors_grayFrame, k=2)

        goodMatches_flann = []
        for m in matches_flann:
            if len(m) > 0 and m[0].distance < 0.2 * m[-1].distance:
                goodMatches_flann.append(m[0])

        result_flann = cv2.drawMatches(target, keypoints, grayFrame,
                                       keypoints_grayFrame, goodMatches_flann,
                                       grayFrame)

        #cv2.imshow("Result_flann_orb", result_flann_orb)

        if len(goodMatches_flann) > 7:
            cv2.destroyWindow("Result")
            cv2.putText(result_flann, 'TARGET-DETECTED', (650, 100),
                        cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 2,
                        cv2.LINE_AA)
            cv2.imshow("Target-detected", result_flann)

            queryPoints = np.float32([
                keypoints[i.queryIdx].pt for i in goodMatches_flann
            ]).reshape(
                -1, 1,
                2)  # extracting location of good matches from targeted image
            trainPoints = np.float32([
                keypoints_grayFrame[j.trainIdx].pt for j in goodMatches_flann
            ]).reshape(
                -1, 1,
                2)  # extracting location of good matches from real time vision
            #print(queryPoints)
            #representativematrix, maskk = cv2.findHomography(queryPoints, trainPoints, cv2.RANSAC, 5.0)  # this matrix represents location of target in real time vision
            #matchesMask = maskk.ravel().tolist()
            #height, width = target.shape  # height and width of original targeted image
            #points = np.float32([[0, 0],[0, height],[width, height],[width,0]]).reshape(-1, 1, 2)
            #adaptiveTemplate = cv2.perspectiveTransform(points, representativematrix)  # points will adapt matrix
            #homography = cv2.polylines(frame, [np.int32(adaptiveTemplate)], True, (255,0,0), 3)

            #cv2.imshow("Homograpyh", homography)

            targetPoints = trainPoints
            #cv2.goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) → corners
            #STMpoints = cv2.goodFeaturesToTrack(grayFrame, mask=None, **feature_params)

            # Create a mask image for drawing purposes
            mask = np.zeros_like(frame)

            while (len(goodMatches_flann) > 7):
                ret, frame2 = cap.read()
                newGrayFrame = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

                # calculate optical flow
                flowMatches, st, err = cv2.calcOpticalFlowPyrLK(
                    grayFrame, newGrayFrame, targetPoints, None, **lk_params)

                # Select good points
                try:
                    good_new = flowMatches[st == 1]
                    good_old = targetPoints[st == 1]
                except:
                    break

                # draw the tracks
                for i, (new, old) in enumerate(zip(good_new, good_old)):
                    a, b = new.ravel()
                    print("new points", (a, b))
                    c, d = old.ravel()
                    print("old points", (c, d))
                    maskk = cv2.line(mask, (a, b), (c, d), color[i].tolist(),
                                     2)
                    frame3 = cv2.circle(frame2, (a, b), 5, color[i].tolist(),
                                        -1)
                    """
                    if (c-a)>0:
                        print("*******DOWN********")
                    elif (c-a)<0:
                        print("*******UP********")
                    elif (d-b) >0:
                        print("*******RIGHT*******")
                    elif (d-b) <0:
                        print("*******LEFT*******")
                    """

                    if not (
                        (150 < a and a < 195) and (115 < b and b < 155)
                    ):  #160 middle     145middle #(130<a and a<220) and (114<b and b<190)
                        if (a < 130):
                            print("RIGHT!")
                            send_body_ned_velocity(0.5, 0, 0, duration=1)
                        if (a > 220):
                            print("LEFT!")
                            send_body_ned_velocity(-0.5, 0, 0, duration=1)
                        if (b < 114):
                            print("BACK!")
                            send_body_ned_velocity(0, -0.5, 0, duration=1)
                        if (b > 190):
                            print("FRONT!")
                            send_body_ned_velocity(0, 0.5, 0, duration=1)
                    else:
                        cv2.putText(frame3, 'Initializing Landing...',
                                    (80, 80), cv2.FONT_HERSHEY_SIMPLEX, .5,
                                    (30, 40, 50), 2, cv2.LINE_AA)

                        print("Initiate Landing...")
                        return True
                """    
                print(maskk.shape)
                print(frame3.shape)    
                print(maskk.dtype)
                print(frame3.dtype)
                """
                img = cv2.add(frame3, maskk)
                img = cv2.rectangle(img, (100, 95), (250, 175), (255, 0, 0), 2)
                img = cv2.rectangle(img, (120, 115), (230, 155), (0, 255, 0),
                                    2)  #(0,10), (350,275)
                cv2.imshow('frame', img)
                # Now update the previous frame and previous points
                grayFrame = newGrayFrame.copy()
                targetPoints = good_new.reshape(-1, 1, 2)
                if cv2.waitKey(1) == 27:
                    break
        else:
            cv2.destroyWindow("Target-detected")
            cv2.destroyWindow("frame")
            #cv2.destroyWindow("Homograpyh")
            cv2.imshow("Result", result_flann)

        if cv2.waitKey(1) == 27:
            break

    # When everything done, release the capture
    cv2.destroyAllWindows()
    cap.release()
import cv2
import imutils
 
# load the image and convert it to grayscale
image = cv2.imread("./images/Station.png")
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
 
# detect ORB keypoints in the image for OpenCV 2.4
if imutils.is_cv2():
	detector = cv2.FeatureDetector_create("ORB")
	kps = detector.detect(gray)
 
# detect ORB keypoints in the image for OpenCV 3+
else:
	detector = cv2.ORB_create()
	kps = detector.detect(gray, None)
 
print("# of keypoints: {}".format(len(kps)))
 
# loop over the keypoints and draw them
for kp in kps:
	r = int(0.5 * kp.size)
	(x, y) = np.int0(kp.pt)
	cv2.circle(image, (x, y), r, (0, 255, 255), 2)
 
# show the image
cv2.imshow("Images", np.hstack([orig, image]))
cv2.waitKey(0)

Пример #27
0
def get_descriptor_from_img(img):
    orb = cv2.ORB_create()
    kp, des = orb.detectAndCompute(img, None)
    return kp, des
Пример #28
0
    def find_1(cls, image, similarity=DEFAULT_SIMILARITY, show=False):
        template = cv2.imread("assets/{}.png".format(image), 0)

        orb = cv2.ORB_create()
        kp1, des1 = orb.detectAndCompute(template, None)
        kp2, des2 = orb.detectAndCompute(screen, None)

        # plt.imshow(template), plt.show()
        # plt.imshow(screen), plt.show()

        # 定义FLANN匹配器
        index_params = dict(algorithm=1, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)

        # 使用KNN算法匹配
        des1 = des1.astype('float32')
        des2 = des2.astype('float32')
        matches = flann.knnMatch(des1, des2, k=2)

        # 去除错误匹配
        good = []
        for m, n in matches:
            # if m.distance < 0.1 * n.distance:
            good.append(m)

        # if len(good) > 10:
        #     # 改变数组的表现形式,不改变数据内容,数据内容是每个关键点的坐标位置
        #     src_pts = numpy.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        #     dst_pts = numpy.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
        #     # findHomography 函数是计算变换矩阵
        #     # 参数cv2.RANSAC是使用RANSAC算法寻找一个最佳单应性矩阵H,即返回值M
        #     # 返回值:M 为变换矩阵,mask是掩模
        #     M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        #     # ravel方法将数据降维处理,最后并转换成列表格式
        #     matchesMask = mask.ravel().tolist()
        #     # 获取img1的图像尺寸
        #     h, w = template.shape
        #     # pts是图像img1的四个顶点
        #     pts = numpy.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
        #     # 计算变换后的四个顶点坐标位置
        #     dst = cv2.perspectiveTransform(pts, M)
        #
        #     # b = np.int32(dst).reshape(4, 2)
        #     # img_temp = img2.copy()
        #     # cv2.fillConvexPoly(img_temp, b, 0)
        #
        #     # 根据四个顶点坐标位置在img2图像画出变换后的边框
        #     img2 = cv2.polylines(screen, [numpy.int32(dst)], True, (0, 255, 0), 3, cv2.LINE_AA)
        # else:
        #     print("Not enough matches are found - %d/%d") % (len(good), MIN_MATCH_COUNT)
        matchesMask = None
        #

        draw_params = dict(
            matchColor=(0, 255, 0),  # draw matches in green color
            singlePointColor=None,
            matchesMask=matchesMask,  # draw only inliers
            flags=2,
        )

        img3 = cv2.drawMatches(template, kp1, screen, kp2, good, None, **draw_params)
        plt.imshow(img3, "gray"), plt.show()

        pass
                cv2.imshow('Dense LK', vis)

        # experirement using a feature based dectection
        elif method is "feature":
            # detect key feature points
            featureDetectorType = "ORB"
            if featureDetectorType is "SIFT":
                detector = cv2.xfeatures2d.SIFT_create()
                kp1 = detector.detect(im1)
                kp2 = detector.detect(im2)
            elif featureDetectorType is "SURF":
                detector = cv2.xfeatures2d.SURF_create()
                kp1 = detector.detect(im1)
                kp2 = detector.detect(im2)
            elif featureDetectorType is "ORB":
                detector = cv2.ORB_create(nfeatures=1500)
                kp1 = detector.detect(im1)
                kp2 = detector.detect(im2)
            else:
                assert (False, "Invalid Feature Detector")

            featureDescriptorType = "ORB"
            if featureDescriptorType is "SIFT":
                descriptor = cv2.xfeatures2d.SIFT_create()
                kp1, des1 = detector.compute(im1, kp1)
                kp2, des2 = detector.compute(im2, kp2)
            elif featureDescriptorType is "SURF":
                descriptor = cv2.xfeatures2d.SURF_create()
                kp1, des1 = detector.compute(im1, kp1)
                kp2, des2 = detector.compute(im2, kp2)
            elif featureDescriptorType is "ORB":
def orbgen(img, n=1000):
    orb = cv2.ORB_create(nfeatures=n)
    #detect features
    (keypoints, descriptors) = orb.detectAndCompute(img, None)
    return keypoints, descriptors