예제 #1
0
 def __init__(self, ratio_matching = 0.5, ratio_pose = 0.5, detecteur = 'fast', descripteur = 'orb'):
     self.ratio_matching = ratio_matching
     self.ratio_pose = ratio_pose
     self.detecteur = detecteur
     self.descripteur = descripteur
     self.detecteurs = {  'akaze': cv2.AKAZE_create(),
                          'agast': cv2.AgastFeatureDetector_create(),
                          'brisk': cv2.BRISK_create(),
                          'fast' : cv2.FastFeatureDetector_create(),
                          'gftt' : cv2.GFTTDetector_create(),
                          'kaze' : cv2.KAZE_create(),
                          'mser' : cv2.MSER_create(),
                          'orb'  : cv2.ORB_create(),
                          'blob' : cv2.SimpleBlobDetector_create() }
     self.descripteurs = {'brisk': cv2.BRISK_create(),
                          'orb'  : cv2.ORB_create(),}
     self.detector =   self.detecteurs[self.detecteur]
     self.descriptor = self.descripteurs[self.descripteur]
     self.matcheur = cv2.BFMatcher(normType = self.descriptor.defaultNorm(), crossCheck = True )
     self.KF = []
     self.current_kf = None
     self.traj = []
     self.current_time = 0
     self.prev_pts = None
     print 'construction du SLAM'
def BRISK_descriptor(img_0, img_1, keypoints_0, keypoints_1, transformation,
                     descriptor):
    descriptor_brisk = cv2.BRISK_create()
    _, descriptor_0 = descriptor_brisk.compute(img_0, keypoints_0)

    _, descriptor_1 = descriptor_brisk.compute(img_1, keypoints_1)

    bf = cv2.BFMatcher()
    matches = bf.match(descriptor_0, descriptor_1)

    matches = sorted(matches, key=lambda x: x.distance)

    img_match = cv2.drawMatches(
        img_0,
        keypoints_0,
        img_1,
        keypoints_1,
        matches[:10],
        None,
        flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)

    plt.figure()
    plt.imshow(img_match)
    plt.title(transformation + ':' + descriptor + 'matching')
    plt.show()
예제 #3
0
 def _set_cv2_instance(self):
     """
     feature_typeに合わせたopencvのインスタンスを生成する
     対応:ORB, BRISK
     """
     params = self.DEFAULT_PARAMS[self.feature_type]
     for param in params.keys():
         if param in self.kwargs:
             params[param] = self.kwargs[param]
     self.cv2_params = params
     if self.feature_type == "orb":
         self.fe = cv2.ORB_create(
             edgeThreshold=0,
             patchSize=params["patchSize"],
             scaleFactor=params["scaleFactor"]
         )
     elif self.feature_type == "brisk":
         # gridで抽出するためthresholdは0とする
         self.fe = cv2.BRISK_create(
             thresh=0,
             octaves=params["octaves"],
             patternScale=params["patternScale"]
         )
     elif self.feature_type == "sift":
         self.fe = cv2.xfeatures2d.SIFT_create(
             contrastThreshold=params["contrastThreshold"],
             edgeThreshold=params["edgeThreshold"],
             sigma=params["sigma"]
         )
예제 #4
0
파일: sift1.py 프로젝트: xuannianc/invoice
 def describe(self, image):
     descriptor = cv2.BRISK_create()
     if self.useSIFT:
         descriptor = cv2.xfeatures2d.SIFT_create()
     (kps, descs) = descriptor.detectAndCompute(image, None)
     kps = np.float32([kp.pt for kp in kps])
     return (kps, descs)
예제 #5
0
def main():

    parser = argparse.ArgumentParser(description="Landmark manager.")
    parser.add_argument("image", type=str, help="frontal view of the landmark")
    parser.add_argument("output", type=str, help="output xml file name")
    parser.add_argument("--debug", action="store_true", help="debug mode")
    parser.add_argument("--ppu",
                        type=float,
                        default=30.0,
                        help="pixels per (real world) unit")

    args = parser.parse_args()

    image = imread(args.image)[..., :3]

    # fextractor = cv2.ORB_create(500)
    fextractor = cv2.BRISK_create()
    # fextractor = cv2.xfeatures2d.SIFT_create()
    # fextractor = cv2.xfeatures2d.SURF_create()
    # fextractor = cv2.xfeatures2d.DAISY_create() # Not working
    orientations = get_orientations(0)
    keypoints, descriptors, image_size, real_size = process_landmark(
        image,
        pixels_per_unit=args.ppu,
        feature_extractor=fextractor,
        orientations=orientations,
        debug=args.debug)
    save(args.output, keypoints, descriptors, image_size, real_size)
예제 #6
0
    def __init__(self):
        self.inited = False
        self.descriptors = []
        self.path_to_icons = r'./res/img/priconne/unit/'
        self._path_to_icons = os.path.expanduser(self.path_to_icons)

        self.default_result = (1000, 3)

        self.classify_thresh = 15
        self.classify_distance_k = 3.0
        self.classify_calc_times = 10

        self.star_position_xs = [0.125, 0.25, 0.375, 0.5, 0.625]
        self.star_position_ys = [0.86] * 5
        self.icon_norm_size = (70, 70)

        self.f_cid = lambda s: int(s[10:14])
        self.f_star = lambda s: int(s[14])

        self._extractor = cv2.BRISK_create(thresh=10, octaves=2)
        # self._extractor = cv2.xfeatures2d.SIFT_create()
        # self._extractor = cv2.ORB_create()
        # self._extractor = cv2.xfeatures2d.SURF_create()

        self._matcher = cv2.BFMatcher(cv2.NORM_L2)
예제 #7
0
 def init_detector(self):
     """Init keypoint detector object."""
     self.detector = cv2.BRISK_create()
     # create BFMatcher object:
     self.matcher = cv2.BFMatcher(
         cv2.NORM_HAMMING
     )  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable)
예제 #8
0
def getFeaturesByDetector(mode,img):
    
    detector = cv2.SimpleBlobDetector_create()
    
    if mode == featuresDetector.Agast:
        detector = cv2.AgastFeatureDetector_create()
    elif mode == featuresDetector.AKAZE:
        detector = cv2.AKAZE_create()
    elif mode == featuresDetector.BRISK:
        detector = cv2.BRISK_create()
    elif mode == featuresDetector.FAST:
        detector = cv2.FastFeatureDetector_create()
    elif mode == featuresDetector.KAZE:
        detector = cv2.KAZE_create()
    elif mode == featuresDetector.MSER:
        detector = cv2.MSER_create()
    elif mode == featuresDetector.ORB:
        detector = cv2.ORB_create()
    elif mode == featuresDetector.SIFT:
        detector = cv2.xfeatures2d.SIFT_create()
    elif mode == featuresDetector.SimpleBlob:
        detector = cv2.SimpleBlobDetector_create()
    
    keypoints = detector.detect(img)
    descriptors = detector.compute(img, keypoints)
    
    return descriptors
예제 #9
0
def FAST_n_BRIEF(img1, img2, n=1000, threshold=40):
    #test1=cv2.imread(img1,0)
    #test2=cv2.imread(img2,0)
    test1 = img1
    test2 = img2

    fast = cv2.FastFeatureDetector_create(threshold=threshold)

    kp1 = fast.detect(test1, None)
    kp2 = fast.detect(test2, None)

    #mark1=cv2.drawKeypoints(test1,kp1,None,color=(0,0,255),flags=0)
    #orient1=cv2.drawKeypoints(test1,kp1,None,color=(0,0,255),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    #mark2=cv2.drawKeypoints(test2,kp2,None,color=(255,0,0),flags=0)
    #orient2=cv2.drawKeypoints(test2,kp2,None,color=(255,0,0),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    brisk = cv2.BRISK_create(thresh=75)
    kp1, des1 = brisk.compute(test1, kp1)
    kp2, des2 = brisk.compute(test2, kp2)

    bf = cv2.BFMatcher(cv2.NORM_HAMMING)
    matches = bf.match(des1, des2)

    matches = sorted(matches, key=lambda x: x.distance)
    #result = cv2.drawMatches(mark1,kp1,mark2,kp2,matches[:min(n, len(matches))],None,matchColor=[0,255,0], flags=2)

    #plt.imshow(result, interpolation = 'bicubic')
    #plt.axis('off')
    #plt.show()

    return kp1, kp2, matches
예제 #10
0
def extract(image_path, type="ORB", ifShow=False):
    image = cv2.imread(image_path, 0)
    if type == "AKAZE":
        fe = cv2.AKAZE_create()
    elif type == "BRISK":
        fe = cv2.BRISK_create()
    elif type == "KAZE":
        fe = cv2.KAZE_create()
    elif type == "ORB":
        fe = cv2.ORB_create()
    elif type == "SIFT":
        fe = cv2.xfeatures2d.SIFT_create()
    elif type == "SURF":
        fe = cv2.xfeatures2d.SURF_create()
    kp, des = fe.detectAndCompute(image, None)
    if ifShow:
        im2show = cv2.drawKeypoints(image,
                                    kp,
                                    None,
                                    color=(0, 255, 0),
                                    flags=0)
        print('%s descriptors number is %d .' % (type, len(des)))
        cv2.imshow(type, im2show)
        cv2.waitKey(0)
    return des
예제 #11
0
def getDetector(index):
    ALGO_TYPE = index
    if (ALGO_TYPE == 0):
        detector = cv.xfeatures2d.SIFT_create()
    elif (ALGO_TYPE == 1):
        detector = cv.xfeatures2d.SURF_create()
    elif (ALGO_TYPE == 2):
        detector = cv.ORB_create(nfeatures=1000,
                                 scaleFactor=1.2,
                                 WTA_K=2,
                                 scoreType=cv.ORB_FAST_SCORE)
    elif (ALGO_TYPE == 3):
        detector = cv.BRISK_create()
    elif (ALGO_TYPE == 4):
        detector = cv.KAZE_create()
    elif (ALGO_TYPE == 5):
        detector = cv.AKAZE_create()

    if (ALGO_TYPE == 0 or ALGO_TYPE == 1 or ALGO_TYPE == 4):
        flannParam = dict(algorithm=FLANN_INDEX_KDITREE, tree=5)
        matcher = cv.FlannBasedMatcher(flannParam, {})
    elif (ALGO_TYPE == 2 or ALGO_TYPE == 3 or ALGO_TYPE == 5):
        matcher = cv.BFMatcher(cv.NORM_HAMMING)

    return (detector, matcher)
예제 #12
0
    def brisk(self):

        default_dir = ""
        savePath = tkinter.filedialog.askdirectory(
            title=u"保存路径", initialdir=(os.path.expanduser(default_dir)))

        # path = 'C:/Users/sls/Desktop/1/'  # 图片读取的目录,更改成自己需要的
        path = self.filePath + '/'
        pathDir = os.listdir(path)
        for eachDir in pathDir:
            imgPath = path + eachDir
            print(imgPath)
            img = cv2.imread(imgPath)
            brisk = cv2.BRISK_create()
            (kpt, desc) = brisk.detectAndCompute(img, None)
            bk_img = img.copy()
            out_img = img.copy()
            out_img = cv2.drawKeypoints(bk_img, kpt, out_img)
            win = cv2.namedWindow('brisk', flags=0)
            cv2.imshow('brisk', out_img)
            cv2.imwrite(savePath + '/' + eachDir, out_img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
        self.dialogText.config(state=NORMAL)
        ss = '\n   Brisk角点检测的图片已保存\n'
        self.dialogText.insert(
            0.0,
            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + '%s' % (ss))
예제 #13
0
def BRISK():
    # Initiate BRISK descriptor
    BRISK = cv.BRISK_create(thresh = 30,
                            octaves = 3,
                            patternScale = 1.0)

    return BRISK
예제 #14
0
def input_value(scn_img):
    h1, w1 = scn_img.shape[:2]
    scn_img = resize(h1, w1, 640, scn_img)
    hr1, wr1 = scn_img.shape[:2]

    #==================================================>KPT_SCN
    # fast:

    # 	fast = cv2.FastFeatureDetector_create(49)
    # 	kp1 = fast.detect(scn_img,masked_data)

    # brisk:

    detector = cv2.BRISK_create(70, 2, .5)
    kp1 = detector.detect(scn_img, None)

    # for k in kp1:
    # 	x,y=k.pt
    # 	print x,y

    #==================================================>DESCRIPTOR_SCN

    # brisk:

    kp1, des1 = detector.compute(scn_img, kp1)

    savekeyPointsOnImage(scn_img, "input1.jpg", kp1, wr1, hr1)

    # img4 = cv2.drawKeypoints(scn_img, kp1,None, color=(0,255,255))
    # cv2.imwrite('input1.jpg', img4)

    return kp1, des1
예제 #15
0
    def __init__(self, type='sift'):

        self.type = type
        #self.matcher_type = matcher_type

        if type == 'sift':
            self.detector = cv.xfeatures2d.SIFT_create()
            self.extractor = None
        elif type == 'surf':
            self.detector = cv.xfeatures2d.SURF_create(300)
            self.extractor = None
        elif type == 'orb':
            self.detector = cv.ORB_create(400)
            self.extractor = None
        elif type == 'akaze':
            self.detector = cv.AKAZE_create()
            self.extractor = None
        elif type == '_kaze':
            self.detector = cv.KAZE_create()
            self.extractor = None
        elif type == 'brisk':
            self.detector = cv.BRISK_create()
            self.extractor = None
        elif type == 'freak':
            self.detector = cv.xfeatures2d.StarDetector_create(20, 15)
            self.extractor = cv.xfeatures2d.FREAK_create()
            self.norm = cv.NORM_HAMMING
        elif type == 'brief':
            self.detector = cv.xfeatures2d.StarDetector_create(20, 15)
            self.extractor = cv.xfeatures2d.BriefDescriptorExtractor_create()
        else:
            self.extractor = cv.xfeatures2d.FREAK_create()
            self.detector = None
            self.extractor = None
            self.norm = None
예제 #16
0
def create_detector(detect):
    detector = None
    #free ones first
    if detect == 'orb':
        detector = cv2.ORB_create(nfeatures=100000)
    if detect == 'akaze':
        detector = cv2.AKAZE_create()
    if detect == 'brisk':
        detector = cv2.BRISK_create()

    #proprietary second
    if detect == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
    if detect == 'surf':
        detector = cv2.xfeatures2d.SURF_create()
    if detect == 'freak':
        detector = cv2.xfeatures2d.FREAK_create()
    if detect == 'latch':
        detector = cv2.xfeatures2d.LATCH_create()
    if detect == 'daisy':
        detector = cv2.xfeatures2d.DAISY_create()
    if detect == 'lucid':
        detector = cv2.xfeatures2d.LUCID_create()

    if detector == None:
        print "Could not find a detector of that name"
        exit()

    print "Processing images using:", detect
    return detector
예제 #17
0
 def __init__(self, algorithm='BRISK'):
     if 'ORB' == algorithm:
         self.alg = cv2.ORB_create()
     elif 'AKAZE' == algorithm:
         self.alg = cv2.AKAZE_create()
     else:
         self.alg = cv2.BRISK_create()
예제 #18
0
def live_demo():

    # initialises fastfeaturedetector to get initial points of interests
    fast = cv2.FastFeatureDetector_create(threshold=50, nonmaxSuppression=50)

    # initialises Binary Robust Invariant Scalable Keypoints for
    # keypoint descriptor analyze
    br = cv2.BRISK_create()

    # BruteForce matcher to compare matches
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    from src.process import ImageProcess as ip
    from src.templates import make_templates

    # initialize the data of the templates
    ct_models = make_templates('CT', fast, br)
    start = time()
    while True:

        # captures the screen with ImageGrab in RGB.
        screen = np.array(ImageGrab.grab(bbox=(0, 27, 800, 627)))
        # converts it to BGR
        screen = cv2.cvtColor(screen, cv2.COLOR_RGB2BGR)

        process = ip(screen, ct_models, fast, br, bf, 'draw')
        print('Loop took: {:.5} seconds'.format(time() - start))

        cv2.imshow('AimAssistant', process.image)
        start = time()
        if cv2.waitKey(25) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
예제 #19
0
 def initialize(self, device):
     if self.type == "sift":
         self.model = cv.SIFT_create(nfeatures=self.n_keypoints)
     elif self.type == "orb":
         self.model = cv.ORB_create(nfeatures=self.n_keypoints)
     elif self.type == "brisk":
         self.model = cv.BRISK_create()
def extract_local_features(images, img_size=300):
    brisk = cv2.BRISK_create(30)
    #sift = cv2.SIFT_create()
    #surf = cv2.SURF(400)
    labeled_featured_images = []
    print('[STATUS] extracting local featured from', len(images), 'images')
    for i, image in enumerate(images):
        resized_arr = cv2.resize(image, (img_size, img_size))

        kpts, des = brisk.detectAndCompute(resized_arr, None)

        # create picture with detected kpts
        if (i == 0):
            print(len(kpts))
            print(brisk.descriptorSize())
            img = cv2.drawKeypoints(
                resized_arr,
                kpts,
                resized_arr,
                flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            cv2.imwrite('brisk_keypoints.jpg', img)

        labeled_featured_images.append(des)

        if (i + 1) % 100 == 0:
            print('[STATUS]', i + 1, 'images processed')

    print('[STATUS] feature extraction of', i + 1, 'images processed')
    return labeled_featured_images
예제 #21
0
def image_match3(image1, image2):
    MIN_MATCH_COUNT = 10

    img1 = cv2.imread(image1, 0)  # queryImage
    img2 = cv2.imread(image2, 0)  # trainImage

    # Initiate brisk detector

    #	orb=cv2.ORB_create()
    #	kp1,des1=orb.detectAndCompute(img1,None)
    #	kp2,des2=orb.detectAndCompute(img2,None)
    brisk = cv2.BRISK_create()
    kp1, des1 = brisk.detectAndCompute(img1, None)
    kp2, des2 = brisk.detectAndCompute(img2, None)

    #
    #FLANN_INDEX_KDTREE = 0
    #index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    #search_params = dict(checks = 50)
    FLANN_INDEX_LSH = 6
    index_params = dict(
        algorithm=FLANN_INDEX_LSH,
        table_number=6,  #12
        key_size=12,  #20
        multi_probe_level=1)  #2
    search_params = dict(checks=100)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)

    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()

        h, w = img1.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
        print("good")
        return True

    else:
        print("Not enough matches are found - %d/%d" %
              (len(good), MIN_MATCH_COUNT))
        matchesMask = None
        return False
예제 #22
0
def test_image(path):
    cv_img = []
    for img in glob.glob(path+"*.jpg"):
        n = cv2.imread(img)
        cv_img.append(n)
    for i in range(len(cv_img)):
        cv_img[i]=createSkeletonImage.create_skeleton(cv_img[i])
        cv_img[i]=cv2.cvtColor(cv_img[i],cv2.COLOR_BGR2GRAY)
    shape=[]
    height = 0 
    width = 0
    for i in range(len(cv_img)):
        shape.append(cv_img[i].shape[:2])
        if shape[i][0] > height:
            height=shape[i][0]
        width+=shape[i][1]
    image_template=np.zeros((height,width), np.uint8)   
    temp=0
    for i in range(len(shape)):
        #print(type(shape[i][0]))
        image_template[:shape[i][0], temp:temp+shape[i][1]]=cv_img[i]
        temp=shape[i][1]

    #print(image_template.shape)
    brisk = cv2.BRISK_create(thresh= 10, octaves= 4)
    keyPoints= brisk.detect(image_template, None)
    keyPoints, descriptors = brisk.compute(image_template, keyPoints)
    print(descriptors.shape)
    check_templates(descriptors)
예제 #23
0
    def __init__(self, *args, **kwargs):
        """
        Constructor for CollectProcess class.
        Initialize the fast, brisk and bf detectors. Loads the templates data and
        create the directory for collection.

        :param args:
        :param kwargs:
        """
        self.__fast = cv2.FastFeatureDetector_create(threshold=50,
                                                     nonmaxSuppression=50)
        self.__br = cv2.BRISK_create()
        self.__bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        try:
            model_type = kwargs['model_type']
        except KeyError:
            model_type = 'CT'

        self.__templates = make_templates(model_type, self.__fast, self.__br)

        self.__modes = args
        self.__kwargs = kwargs

        self.__dp = DataPath()

        self.__file_index = self.__dp.get_index('collected')
        self.__dir = os.path.join(self.__dp.collected, str(self.__file_index))
        os.mkdir(self.__dir)

        self.__index = 0

        col_names = ('detections', 'intensity', 'fast_kp', 'process_time')
        self.__df = pd.DataFrame(columns=col_names)
예제 #24
0
def descriptor(path):
    frame = cv2.imread(path)
    frame = cv2.resize(frame, (128, 128))

    convertedgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    convertedhsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    #cv2.imshow("hsv",convertedhsv)

    lowerBoundary = np.array([0, 40, 30], dtype="uint8")  #hsv ranges for skin
    upperBoundary = np.array([43, 255, 254], dtype="uint8")
    skinMask = cv2.inRange(convertedhsv, lowerBoundary, upperBoundary)
    #cv2.imshow("inrange",skinMask)
    skinMask = cv2.addWeighted(skinMask, 0.5, skinMask, 0.5, 0.0)
    #cv2.imshow("masked",skinMask)

    skinMask = cv2.medianBlur(skinMask, 5)  #remove noise
    skin = cv2.bitwise_and(convertedgray, convertedgray,
                           mask=skinMask)  #mask out hand

    img2 = cv2.Canny(skin, 60, 60)
    brisk = cv2.BRISK_create()
    #brisk = cv2.xfeatures2d.SURF_create()
    img2 = cv2.resize(img2, (256, 256))
    kp, des = brisk.detectAndCompute(img2, None)
    img2 = cv2.drawKeypoints(img2, kp, None, (0, 0, 255), 4)
    #plt.imshow(img2),plt.show()
    #cv2.waitKey(0)
    cv2.destroyAllWindows()
    #print(len(des))
    return des
예제 #25
0
def detect_kp(img, det_type='SIFT', verbose=False):
    """Detect keypoints."""
    if det_type == 'ORB':
        detector = cv2.ORB_create()
    elif det_type == 'BRISK':
        detector = cv2.BRISK_create()
    else:
        detector = cv2.SIFT_create()

    t0 = time.time()
    kp, des = detector.detectAndCompute(img, None)
    elapsed = time.time() - t0

    # Show detected
    if verbose:
        img_keypoints = np.empty((img.shape[0], img.shape[1], 3),
                                 dtype=np.uint8)
        cv2.drawKeypoints(img, kp, img_keypoints)

        cv2.namedWindow('img', cv2.WINDOW_NORMAL)
        cv2.imshow('img', img_keypoints)
        cv2.waitKey()
        cv2.destroyWindow('img')

    return kp, des, elapsed / len(kp)
예제 #26
0
    def __init__(self, config='GFTT-BRIEF'):
        super().__init__()

        if config == 'GFTT-BRIEF':
            self.feature_detector = cv2.GFTTDetector_create(
                maxCorners=1000,
                minDistance=12.0,
                qualityLevel=0.001,
                useHarrisDetector=False)

            self.descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
                bytes=32, use_orientation=False)

        elif config == 'GFTT-BRISK':
            self.feature_detector = cv2.GFTTDetector_create(
                maxCorners=2000,
                minDistance=15.0,
                qualityLevel=0.01,
                useHarrisDetector=False)

            self.descriptor_extractor = cv2.BRISK_create()

        elif config == 'ORB-ORB':
            self.feature_detector = cv2.ORB_create(nfeatures=1000,
                                                   scaleFactor=1.2,
                                                   nlevels=1,
                                                   edgeThreshold=31)
            self.descriptor_extractor = self.feature_detector

        else:
            raise NotImplementedError

        self.descriptor_matcher = cv2.BFMatcher(cv2.NORM_HAMMING,
                                                crossCheck=False)

        self.matching_cell_size = 15  # pixels
        self.matching_neighborhood = 3
        self.matching_distance = 30

        self.frustum_near = 0.1  # meters
        self.frustum_far = 1000.0

        self.ground = True

        self.lc_max_inbetween_distance = 50
        self.lc_distance_threshold = 15
        self.lc_embedding_distance = 20.0

        self.view_image_width = 400
        self.view_image_height = 130
        self.view_camera_width = 0.75
        self.view_viewpoint_x = 0
        self.view_viewpoint_y = -500  # -10
        self.view_viewpoint_z = -100  # -0.1
        self.view_viewpoint_f = 2000

        self.line_detector = cv2.line_descriptor.BinaryDescriptor_createBinaryDescriptor(
        )
        self.line_extractor = self.line_detector
        self.line_matching_distance = 30
예제 #27
0
def set_descriptor(name):
    desc = None
    if name == "SIFT":
        desc = cv2.xfeatures2d.SIFT_create()
    elif name == "SURF":
        desc = cv2.xfeatures2d.SURF_create()
    elif name == "ORB":
        desc = cv2.ORB_create()
    elif name == "BRISK":
        desc = cv2.BRISK_create()
    elif name == "Brief":
        desc = cv2.xfeatures2d.BriefDescriptorExtractor_create()
    elif name == "FREAK":
        desc = cv2.xfeatures2d.FREAK_create()
    elif name == "LATCH":
        desc = cv2.xfeatures2d.LATCH_create()
    elif name == "AKAZE":
        desc = cv2.AKAZE_create()
    elif name == "KAZE":
        desc = cv2.KAZE_create()
    elif name == "DAISY":
        desc = cv2.xfeatures2d.DAISY_create()
    elif name == "LUCID":
        desc = cv2.xfeatures2d.LUCID_create()

    return desc
예제 #28
0
파일: draper.py 프로젝트: mingtotti/kaggle
def _imgStitcher(imgX, imgY, pcntDownsize=1.0, tryReverse=False):
    imgGrayX = _imgPreprocess(imgX, pcntDownsize)
    imgGrayY = _imgPreprocess(imgY, pcntDownsize)

    # Use BRISK to create key points in each image
    brisk = cv2.BRISK_create()
    kpX, desX = brisk.detectAndCompute(imgGrayX, None)
    kpY, desY = brisk.detectAndCompute(imgGrayY, None)

    # Use BruteForce algorithm to detect matches among image keypoints
    dm = cv2.DescriptorMatcher_create("BruteForce")
    matches = dm.knnMatch(desX, desY, 2)
    filteredMatches = []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
            filteredMatches.append((m[0].trainIdx, m[0].queryIdx))

    kpX = numpy.float32([kpX[m[1]].pt for m in filteredMatches])
    kpX = kpX.reshape(-1, 1, 2)
    kpY = numpy.float32([kpY[m[0]].pt for m in filteredMatches])
    kpY = kpY.reshape(-1, 1, 2)

    # Calculate homography matrix
    H, mask = cv2.findHomography(kpY, kpX, cv2.RANSAC, 4.0)

    if H is None and not tryReverse:
        # Try again with 100% scaling
        H = _imgStitcher(imgX, imgY, 1.0, True)
    if H is None:
        H = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]

    return (H)
예제 #29
0
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv.xfeatures2d.SIFT_create()
        norm = cv.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv.xfeatures2d.SURF_create(800)
        norm = cv.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv.ORB_create(400)
        norm = cv.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv.AKAZE_create()
        norm = cv.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv.BRISK_create()
        norm = cv.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv.BFMatcher(norm)
    return detector, matcher
예제 #30
0
def brisk_thread():
    brisk = cv2.BRISK_create()
    (kps5, descs5) = brisk.detectAndCompute(gray, None)
    cv2.drawKeypoints(gray,
                      kps5,
                      img_brisk,
                      flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)