Example #1
0
def calculate_point(kps_left, sco_left, des_left, kps_right, sco_right, des_right):
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des_left, des_right, k=2)
    
    # matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)
    # matches = matcher.knnMatch(des_left, des_right, 2)
    
    
    goodMatch = []
    locations_1_to_use = []
    locations_2_to_use = []
    
    # 匹配对筛选
    min_dist = 1000
    max_dist = 0
    disdif_avg = 0
    # 统计平均距离差
    for m, n in matches:
        disdif_avg += n.distance - m.distance
    disdif_avg = disdif_avg / len(matches)
    # print('disdif_avg:', disdif_avg)
    for m, n in matches:
        #自适应阈值
        if n.distance > m.distance + 1*disdif_avg:
        # if m.distance < 0.9 * n.distance: 
            goodMatch.append(m)
            p2 = cv2.KeyPoint(kps_right[m.trainIdx][0],  kps_right[m.trainIdx][1],  1)
            p1 = cv2.KeyPoint(kps_left[m.queryIdx][0], kps_left[m.queryIdx][1], 1)
            locations_1_to_use.append([p1.pt[0], p1.pt[1]])
            locations_2_to_use.append([p2.pt[0], p2.pt[1]])
    #goodMatch = sorted(goodMatch, key=lambda x: x.distance)
    # print('match num is %d' % len(goodMatch))
    locations_1_to_use = np.array(locations_1_to_use)
    locations_2_to_use = np.array(locations_2_to_use)
    
    # Perform geometric verification using RANSAC.
    _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                              transform.AffineTransform,
                              min_samples=3,
                              residual_threshold=_RESIDUAL_THRESHOLD,
                              max_trials=1000)
    
    print('Found %d inliers' % sum(inliers))
    
    inlier_idxs = np.nonzero(inliers)[0]
    #最终匹配结果
    matches = np.column_stack((inlier_idxs, inlier_idxs))
    # print('whole time is %6.3f' % (time.perf_counter() - start0))
    
    # Visualize correspondences, and save to file.
    #1 绘制匹配连线
    plt.rcParams['savefig.dpi'] = 100 #图片像素
    plt.rcParams['figure.dpi'] = 100 #分辨率
    plt.rcParams['figure.figsize'] = (4.0, 3.0) # 设置figure_size尺寸
    _, ax = plt.subplots()
    plotmatch.plot_matches(
        ax,
        image1,
        image2,
        locations_1_to_use,
        locations_2_to_use,
        np.column_stack((inlier_idxs, inlier_idxs)),
        plot_matche_points = False,
        matchline = True,
        matchlinewidth = 0.5)
    ax.axis('off')
    ax.set_title('')
    plt.show()
    # print('inlier_idxs:', len(inlier_idxs))
    res = locations_2_to_use[inlier_idxs] - locations_1_to_use[inlier_idxs]
    return locations_1_to_use[inlier_idxs], locations_2_to_use[inlier_idxs]
Example #2
0
 def __init__(self, sift_kp_size=16, concat=True, kps_idx=numpy.arange(4, 31, 8)):
     self.concat = concat
     self.sift = cv2.SIFT()
     coordinates = list(itertools.product(kps_idx, kps_idx))
     self.kps = [cv2.KeyPoint(i, j, sift_kp_size) for (i, j) in coordinates]
Example #3
0
 def to_cv2_kp(kp):
     # assert kp = [<row>, <col>, <ori>, <octave_ind>, <layer_ind>]
     ratio = get_size_ratio_by_octave(kp[3])
     scale = get_scale_by_ind(kp[3], kp[4])
     return cv2.KeyPoint(kp[1] / ratio, kp[0] / ratio, 10,
                         kp[2] / PI * 180)
def load_graffiti_help(graffiti_dir, N_imgs, N_data, thresh):
    # Load graffiti dataset images
    n = N_data
    N_points = 10 * n
    data_files = [
        os.path.join(graffiti_dir, 'img{}.png'.format(i + 1))
        for i in range(N_imgs)
    ]
    imgs = [imageio.imread(fname)[:, :, :3] for fname in data_files]
    # Build Homographies
    H = [np.eye(3)]
    for i in range(1, N_imgs):
        h = []
        with open(os.path.join(graffiti_dir, 'H1to{}p'.format(i + 1)),
                  'r') as f:
            for l in f:
                h.append([float(x) for x in l.split()])
        H.append(np.array(h))
    # Compute sift descriptors for first image
    sift = cv2.xfeatures2d.SIFT_create(N_points)
    keypoints0, descs0 = sift.detectAndCompute(imgs[0], None)
    # Select features with valid x,y coordinates not too close to others
    descs0 = [d / np.linalg.norm(d) for d in descs0[:N_points]]
    xy0 = np.stack([[k.pt[0], k.pt[1], 1] for k in keypoints0], axis=1)
    ids1 = distint_locs(xy0, thresh)
    xy0 = np.stack([xy0[:, ii] for ii in ids1], axis=1)
    sel = xy0[0, :] < np.inf  # Should all be true
    for j in range(1, N_imgs):
        xyj = h_apply(H[j], xy0)
        and_lists = [
            xyj[0, :] > 0,
            xyj[1, :] > 0,
            xyj[0, :] < imgs[j].shape[1],
            xyj[1, :] < imgs[j].shape[0],
        ]
        sel = list(functools.reduce(np.logical_and, and_lists, sel))
    # Sort features by their selectiveness
    descs0 = np.stack([descs0[ids1[i]] for i in range(len(ids1)) if sel[i]])
    keypoints0 = [keypoints0[ids1[i]] for i in range(len(ids1)) if sel[i]]
    Sim = np.dot(descs0, descs0.T)
    SimSortList = [np.sort(s)[::-1] for s in Sim]
    vec = (np.arange(len(descs0))[::-1])**(2)
    best_idxs = sorted(np.arange(len(descs0)),
                       key=lambda x: SimSortList[x] @ vec)
    # Select the features and pull out their features
    xy_ = xy0[:, sel]
    xy = [xy_[:, best_idxs[:n]]]
    descs = [descs0[best_idxs[:n], :]]
    keypoints = [[keypoints0[i] for i in best_idxs[:n]]]
    # Compute the features for their location in the other images
    for i in range(1, N_imgs):
        xy.append(h_apply(H[i], xy[0]))
        kpts = []
        for j in range(xy[0].shape[1]):
            x0, y0 = xy[0][0, j], xy[0][1, j]
            size_new, angle_new = get_size_angle(x0, y0, H[i], keypoints0[j])
            octv = size_to_octave_desc(size_new)
            kpts.append(
                cv2.KeyPoint(x=xy[i][0, j],
                             y=xy[i][1, j],
                             _size=size_new,
                             _angle=angle_new,
                             _response=keypoints0[j].response,
                             _octave=octv,
                             _class_id=keypoints0[j].class_id))
        keypoints.append(kpts)
    # Compute the full descriptors
    # These include the feature descriptor, x, y, log scale, and orientation
    FullDescs = []
    for kpts, img in zip(keypoints, imgs):
        h, w, c = imgs[i].shape
        _, descs = sift.compute(img, kpts)
        d_ = [(d / np.linalg.norm(d)) for d in descs]
        dkp_ = [
            np.array(
                list(d_[i]) + [(k.pt[0] - w / 2) / w, (k.pt[1] - h / 2) / h,
                               np.log(k.size),
                               np.deg2rad(k.angle)])
            for i, k in enumerate(kpts)
        ]
        FullDescs.append(dkp_)
    return np.stack(FullDescs)
Example #5
0
        x_0, y_0, _ = tuple(h_apply(H[0, i], (x0 + s * dx_, y0 + s * dy_, 1)))
        x_1, y_1, _ = tuple(h_apply(H[0, i], (x0 - s * dy_, y0 + s * dx_, 1)))
        x_2, y_2, _ = tuple(h_apply(H[0, i], (x0 + s * dy_, y0 - s * dx_, 1)))
        x_3, y_3, _ = tuple(h_apply(H[0, i], (x0 - s * dx_, y0 - s * dy_, 1)))
        s_new = np.mean([
            np.linalg.norm((x_0 - x_3, y_0 - y_3)) / 2,
            np.linalg.norm(((x_1 - x_2, y_1 - y_2))) / 2
        ])
        octv = size_to_octave_desc(s_new)
        angle_new = np.arctan2(-y_0 + y_3, x_0 - x_3)
        angle_new = np.rad2deg(angle_new + 2 * np.pi * (angle_new < 0))
        kpts.append(
            cv2.KeyPoint(x=x,
                         y=y,
                         _size=s_new,
                         _angle=angle_new,
                         _response=keypoints0[j].response,
                         _octave=octv,
                         _class_id=keypoints0[j].class_id))
    keypoints.append(kpts)

descriptors = []
Descs = []
FullDesc = []
for kpts, img in zip(keypoints, imgs):
    h, w, c = imgs[i].shape
    _, descs = sift.compute(img, kpts)
    d_ = [(d / np.linalg.norm(d)) for d in descs]
    descriptors.append(d_)
    dkp_ = [
        np.array(
Example #6
0
def main(config):

    # Build Networks
    tf.reset_default_graph()

    photo_ph = tf.placeholder(tf.float32, [1, None, None, 1]) # input grayscale image, normalized by 0~1
    is_training = tf.constant(False) # Always False in testing

    ops = build_networks(config, photo_ph, is_training)

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True 
    sess = tf.Session(config=tfconfig)
    sess.run(tf.global_variables_initializer())

    # load model
    saver = tf.train.Saver()
    print('Load trained models...')

    if os.path.isdir(config.model):
        checkpoint = tf.train.latest_checkpoint(config.model)
        model_dir = config.model
    else:
        checkpoint = config.model
        model_dir = os.path.dirname(config.model)


    if checkpoint is not None:
        print('Checkpoint', os.path.basename(checkpoint))
        print("[{}] Resuming...".format(time.asctime()))
        saver.restore(sess, checkpoint)
    else:
        raise ValueError('Cannot load model from {}'.format(model_dir))    
    print('Done.')

    ##########################################################################

    new_size = (config.w, config.h)

    # setup output dir
    res_dir = os.path.join('res/lfnet/', config.trials)
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    # write human readable logs
    f = open(os.path.join(res_dir, 'log.txt'), 'w')
    f.write('lfnet\n')
    f.write('data: %s\n'%cst.DATA)
    f.write('thresh_overlap: %d\n'%cst.THRESH_OVERLAP)
    f.write('thresh_desc: %d\n'%cst.THRESH_DESC)

    
    # feature matcher handler for visualization
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)
    matcher = cv2.FlannBasedMatcher(index_params,search_params)
    norm = 'L2'

    
    global_start_time = time.time()
    for scene_name in cst.SCENE_LIST:

        print('************ %s ************'%scene_name)
        f.write('************ %s ************\n'%scene_name)
        scene_dir = '%s/%s/'%(cst.DATA_DIR, scene_name)
        pose_fn = '%s/images.txt'%(scene_dir)
        img_list = [ l for l in sorted(os.listdir(scene_dir)) if l[-3:]=='png']
        img_num = len(img_list)

        # intrinsic params (same for all img)
        camera_fn   = os.path.join(scene_dir, '0000.png.camera')
        #print('camera_fn: %s'%camera_fn)
        K, T = bench_tools.load_camera(camera_fn)
        fx = K[0,0]
        fy = K[1,1]
        cx = K[0,2]
        cy = K[1,2]   
        K_inv = np.linalg.inv(K)

        img0_root_fn = '%04d'%0
        img0_fn = os.path.join(scene_dir, '%s.png'%img0_root_fn)
        img0 = cv2.imread(img0_fn)
        old_size = (img0.shape[1], img0.shape[0])
        img0 = cv2.resize(img0, new_size, interpolation=cv2.INTER_AREA)
         
        sx2big, sy2big = 1.0*old_size[0]/new_size[0], 1.0*old_size[1]/new_size[1] 
        sx2small, sy2small = 1.0*new_size[0]/old_size[0], 1.0*new_size[1]/old_size[1]
        print('sx2big: %.3f - sy2big: %.3f'%(sx2big, sy2big))
        print('sx2small: %.3f - sy2small: %.3f'%(sx2small, sy2small))
        
        # get colmap depth map
        depth0_fn    = '%s/%s.png.photometric.bin'%(scene_dir, img0_root_fn)
        depth0_map = bench_tools.read_array(depth0_fn)
        depth0_map = cv2.resize(depth0_map, old_size, interpolation=cv2.INTER_CUBIC)
        print('depth0_map.shape', depth0_map.shape)
        
        # Get camera pose
        kp_all_l = [l.split("\n")[0] for l in open(pose_fn).readlines()][4:]
        header0, kp0_l = [],[]
        for i, l in enumerate(kp_all_l):
            if l.split(" ")[-1] == ('%s.png'%img0_root_fn):
                header0 = l
                kp0_l = kp_all_l[i+1]
        kp0_l = kp0_l.split(" ")
        kp0_l = np.reshape(np.array(kp0_l), [-1,3]).astype(float)
        q0 = np.array(header0.split(" ")[1:5]).astype(np.float)
        t0 = np.array(header0.split(" ")[5:8]).astype(np.float)
        R0 = bench_tools.qvec2rotmat(q0)

        img0 = cv2.cvtColor(img0, cv2.COLOR_RGB2GRAY)
        img0_bw = img0.copy()
        img0 = img0[None,...,None].astype(np.float32) / 255.0 # normalize 0-1
        assert img0.ndim == 4 # [1,H,W,1]
        # Dump keypoint locations and their features
        outs = sess.run( {'kpts': ops['kpts'],'feats': ops['feats']}, 
            feed_dict= {photo_ph: img0})
        pts0 = outs['kpts'].T
        des0 = outs['feats']
        kp0 = []
        for pt in pts0.T:
            kp = cv2.KeyPoint(x=pt[0],y=pt[1], _size=2,
                _angle=0, _response=0, _octave=0, _class_id=0)
            kp0.append(kp)
        
        kp_on_img0 = np.tile(np.expand_dims(img0_bw,2), (1,1,3))
        for i,kp in enumerate(kp0):
            pt = (int(round(kp.pt[0])), int(round(kp.pt[1])))
            cv2.circle(kp_on_img0, pt, 1, (0, 255, 0), -1, lineType=16)

                
        for img_id in range(1,img_num):
            # get img
            print('** %04d ** '%img_id)
            f.write('** %04d **\n'%img_id)
            img1_root_fn = '%04d'%img_id
            img1_fn = os.path.join(scene_dir, '%s.png'%img1_root_fn)
            img1 = cv2.imread(img1_fn)
            img1 = cv2.resize(img1, new_size, interpolation=cv2.INTER_AREA)
            
            # get depth
            depth1_fn    = '%s/%s.png.photometric.bin'%(scene_dir, img1_root_fn)
            depth1_map = bench_tools.read_array(depth1_fn)
            depth1_map = cv2.resize(depth1_map, old_size, interpolation=cv2.INTER_CUBIC)
            #print('depth1_map.shape', depth1_map.shape)
            #raw_input('wait')
            
            # get camera pose
            header1, kp1_l = [],[] # camera pose, pixels list
            for i, l in enumerate(kp_all_l):
                if l.split(" ")[-1] == ('%s.png'%img1_root_fn):
                    header1 = l
                    kp1_l = kp_all_l[i+1]
            kp1_l = kp1_l.split(" ")
            kp1_l = np.reshape(np.array(kp1_l), [-1,3]).astype(float)
            q1 = np.array(header1.split(" ")[1:5]).astype(np.float)
            t1 = np.array(header1.split(" ")[5:8]).astype(np.float)
            R1 = bench_tools.qvec2rotmat(q1)
            
            img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
            img1_bw = img1.copy()
            img1 = img1[None,...,None].astype(np.float32) / 255.0 # normalize 0-1
            assert img1.ndim == 4 # [1,H,W,1]
            # Dump keypoint locations and their features
            outs = sess.run( {'kpts': ops['kpts'],'feats': ops['feats']}, 
                    feed_dict= {photo_ph: img1})
            pts1 = outs['kpts'].T
            des1 = outs['feats']
            kp1 = []
            for pt in pts1.T:
                kp = cv2.KeyPoint(x=pt[0],y=pt[1], _size=2,
                    _angle=0, _response=0, _octave=0, _class_id=0)
                kp1.append(kp)

            kp_on_img1 = np.tile(np.expand_dims(img1_bw,2), (1,1,3))
            for i,kp in enumerate(kp1):
                pt = (int(round(kp.pt[0])), int(round(kp.pt[1])))
                cv2.circle(kp_on_img1, pt, 1, (0, 255, 0), -1, lineType=16)

            # scale kp to original size (i.e. the big img) for projection
            # f**k floats in python
            # let's go
            h_pts0 = np.vstack([[sx2big*kp.pt[0], sy2big*kp.pt[1], 1] for kp in kp0])
            h_pts1 = np.vstack([[sx2big*kp.pt[0], sy2big*kp.pt[1], 1] for kp in kp1])
        
            rep, N1, N2, M = bench_tools.rep(old_size, h_pts0, h_pts1, K, depth0_map,
                    depth1_map, R0, t0, R1, t1, cst.THRESH_OVERLAP*sx2big)
            print('rep: %.3f - N1: %d - N2: %d - M: %d'%(rep,N1,N2,M))
            f.write('rep:%.3f - N1:%d - N2:%d - M:%d\n' %(rep,N1,N2,M))
            
            ms, N1, N2, M_len, M_d_len, inter = bench_tools.ms(old_size, des0,
                    des1, h_pts0, h_pts1, K, depth0_map, depth1_map, R0, t0, R1,
                    t1, cst.THRESH_OVERLAP*sx2big, cst.THRESH_DESC, norm='L2')
            print('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d'
                    %(ms,N1,N2,M_len, M_d_len, inter))
            f.write('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d\n'
                %(ms, N1, N2, M_len, M_d_len, inter))
 
            if cst.DEBUG:
                # match sift
                good = []
                matches = matcher.knnMatch(des0, des1,k=2)
                for i,(m,n) in enumerate(matches):
                    if m.distance < 0.8*n.distance:
                        good.append(m)
                match_des_img = cv2.drawMatches(img0_bw, kp0, img1_bw, kp1,
                        good, None, flags=2)
                cv2.imshow('match_des', match_des_img)
                cv2.imshow('kp_on_img0', np.hstack((kp_on_img0,
                    kp_on_img1)))
                cv2.waitKey(0)

    f.close()
    print('Done.')
Example #7
0
import cv2 as cv
import numpy as np

IMAGE_PATH1 = './images/a.jpg'
IMAGE_PATH2 = './images/c.jpg'

img1 = cv.imread(IMAGE_PATH1)
img2 = cv.imread(IMAGE_PATH2)

key1 = cv.KeyPoint(0, 0, 1)
key2 = cv.KeyPoint(0, 0, 1)

img_matches = np.empty(
    (max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1], 3),
    dtype=np.uint8)

print(img2[199, 132])
print(img1.shape)
print(img2.shape)
print(img_matches.shape)
img_matches[:img1.shape[0], :img1.shape[1]] = img1
img_matches[:img2.shape[0], img1.shape[1]:img1.shape[1] + img2.shape[1]] = img2

cv.circle(img_matches, (1, 100), 10, (0, 0, 255))
cv.circle(img_matches, (img1.shape[0] + 10, 10), 10, (0, 0, 255))
cv.line(img_matches, (10, 10), (img1.shape[0] + 10, 10), (0, 0, 255))
cv.imshow("test", img_matches)
cv.waitKey()
Example #8
0
File: slam.py Project: tw255/Vslam
mx,my,mz = [],[],[]

frames = []
idxs = []
while(cap.isOpened()):
    
    #if fc == 0:
    ret, frame = cap.read()
    #frames.append(frame)
    frame0 = frame
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = np.float32(gray)
    orb = cv2.ORB_create()
    pts = cv2.goodFeaturesToTrack(gray, 3000, 0.01, 7)

    kps = [cv2.KeyPoint(x=pt[0][0], y=pt[0][1], _size=20) for pt in pts]
    kps, des = orb.compute(frame, kps)

    #for pt in pts:
     #   x,y = pt.ravel()
      #  cv2.circle(frame,(x,y), 3, (0,255,0), -1)
    
    #if fc == 1:
    ret, frame = cap.read()
    bat = []
    idx1,idx2 = [],[]
    frame1 = frame
    bf = cv2.BFMatcher(cv2.NORM_HAMMING)
    if prev is not None:
        matches = bf.knnMatch(des, prev['des'], k =2)
        for m,n in matches:
Example #9
0
#! /usr/bin/env python
from optparse import OptionParser
import json
from pprint import pprint
import cv2
import re
import numpy as np
import pickle
import random
import csv
import os
class SaveClass:
    def __init__(self, votes, keypoints, descriptors, bodypart, hessianThreshold, nOctaves, nOctaveLayers):
        self.votes = votes
        self.keypoints = keypoints
        self.descriptors = descriptors
        self.bodypart = bodypart
        self.hessianThreshold = hessianThreshold
        self.nOctaves = nOctaves
        self.nOctaveLayers = nOctaveLayers
if __name__ == '__main__':
    parser = OptionParser()
    # Read the options
    parser.add_option("", "--train-annotation", dest="train_annotation_file", default="", help="frame level training annotation JSON file")
    parser.add_option("", "--train-annotation-list", dest="train_annotation_list", default="",help="list of frame level training annotation JSON files")
    parser.add_option("", "--project-path", dest="project_dir", default="", help="path containing data directory")
    parser.add_option("", "--mh-neighborhood", dest="mh_neighborhood", type="int", default=10,help="distance from mouth hook for a keyppoint to be considered relevant for training")
    parser.add_option("", "--positive-training-datafile", dest="train_data_pos", help="File to save the information about the positive training data")
    parser.add_option("", "--negative-training-datafile", dest="train_data_neg", help="File to save the information about the negative training data")
    parser.add_option("", "--display", dest="display_level", default=0, type="int",help="display intermediate and final results visually, level 5 for all, level 1 for final, level 0 for none")
Example #10
0
    count = 0
    for kp in keypoints:
        # print kp.pt, kp.size
        print('kp:{}, size:{}'.format(kp.pt, kp.size))
        x = int(kp.pt[0])
        y = int(kp.pt[1])
        size = int(kp.size) >> 1
        # extract angle in keypoint sub window
        phase_kp = phase[x - size:x + size, y - size:y + size]

        # extract gradient magnitude in keypoint subwindow
        magnitude_kp = magnitude[x - size:x + size, y - size:y + size]

        # create histogram of angle in subwindow BUT only where magnitude of gradients is non zero! Why? Find an
        # answer to that question use np.histogram
        non_zero_angle = phase_kp[magnitude_kp != 0]

        (hist, bins) = np.histogram(non_zero_angle,
                                    bins=[0, 1, 2, 3, 4, 5, 6, 7, 8])
        plot_histogram(hist, bins)
        descr[count] = hist

    return descr


keypoints = [cv2.KeyPoint(15, 15, 11)]

# test for all test images
test = cv2.imread('./images/hog_test/circle.jpg')
descriptor = compute_simple_hog(test, keypoints)
M, mask = cv.findHomography(corners1, corners2, cv.RANSAC, 5.0)
# matchesMask = mask.ravel().tolist()

# Find the Fundamental Matrix
F, mask = cv.findFundamentalMat(corners1, corners2, cv.FM_RANSAC)

# TODO: This has so much noise it returns a rank 3.  Refine F.

matchesMask = mask.ravel().tolist()

# Convert corners to keypoints
x1 = corners1[:, :, 0]
y1 = corners1[:, :, 1]
x2 = corners2[:, :, 0]
y2 = corners2[:, :, 1]
kp1 = [cv.KeyPoint(x1[idx], y1[idx], 1) for idx in range(len(x1))]
kp2 = [cv.KeyPoint(x2[idx], y2[idx], 1) for idx in range(len(x2))]

# Visualize corners in image
corners1 = np.int0(corners1)
corners2 = np.int0(corners2)

for i in corners1:
    x, y = i.ravel()
    cv.circle(img1, (x, y), 3, 255, -1)

for j in corners2:
    u, v = j.ravel()
    cv.circle(img2, (u, v), 3, 255, -1)

fig1 = plt.figure(figsize=(18, 16), dpi=80, facecolor='w', edgecolor='k')
Example #12
0
 def to_cv2_kp(self, kp):
     # kp is like [batch_idx, y, x, channel]
     return cv2.KeyPoint(int(kp[2]/self.sw), int(kp[1]/self.sh), 0)
Example #13
0
def to_cv2_kp(kp):
    # kp is like [batch_idx, y, x, channel]
    return cv2.KeyPoint(kp[2], kp[1], 0)
Example #14
0
    keypoints_1 = keypoints_1[idx]

    keypoints_2 = sift.detect_image(img2)  # detect features
    idx = np.where(np.sum(keypoints_2.desc, 1) !=
                   0)  # remove zero descriptors from keypoints
    keypoints_2 = keypoints_2[idx]

    # feature matching
    s_time = time.time()
    matches = sift.match_images(keypoints_1,
                                keypoints_2)  # matching between 2 keypoints
    print("Feature Matching took took {} sec".format(time.time() - s_time))

    # transform into cv2 keypoints
    cv_kp1 = [
        cv2.KeyPoint(x=keypoints_1.x[i], y=keypoints_1.y[i], _size=20)
        for i in range(len(keypoints_1.x))
    ]
    cv_kp2 = [
        cv2.KeyPoint(x=keypoints_2.x[i], y=keypoints_2.y[i], _size=20)
        for i in range(len(keypoints_2.x))
    ]

    # draw the N first matches
    N = 50
    draw_matches(img1, cv_kp1, img2, cv_kp2, matches[:N])

    # draw the features on img 1
    # img3 = np.array([])
    # img3 = cv2.drawKeypoints(img2, cv_kp2, img3, color=(0, 0, 255))
    # plt.imshow(img3), plt.show()
Example #15
0
'''
PROCESS FRAME
'''
global vslog
vslog = logging.getLogger('VSLAM')
logging.basicConfig(level=logging.INFO, format='%(levelname)8s  %(message)s')
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)


def _pickle_keypoints(point):
    return cv2.KeyPoint, (*point.pt, point.size, point.angle, point.response,
                          point.octave, point.class_id)


copyreg.pickle(cv2.KeyPoint().__class__, _pickle_keypoints)


def writer(imgnames, masknames, config_dict, queue):
    '''
    This funtion creates Frame objects which read images from the disk, detects features
    and feature descriptors using settings in the config file. It then puts the object into
    a multi-process queue.
    This function is designed to run in a separate heap and thus takes everything it needs
    in form of parameteres and doesn't rely on global variables.
    '''
    #TILE_KP = config_dict['use_tiling_non_max_supression']
    USE_MASKS = config_dict['use_masks']
    USE_CLAHE = config_dict['use_clahe']
    FEATURE_DETECTOR_TYPE = config_dict['feature_detector_type']
    FEATURE_DESCRIPTOR_TYPE = config_dict['feature_descriptor_type']
Example #16
0
    return canvas

#
# Load Images
# im1 = cv2.imread( '../image/church1.jpg', 0)
# im2 = cv2.imread( '../image/church2.jpg', 0)

im1 = cv2.imread( '../image/a.png')
im2 = cv2.imread( '../image/b.png')
# sift = cv2.xfeatures2d.SIFT_create()
sift = cv2.xfeatures2d.DAISY_create()


step_size = 5
kp = [cv2.KeyPoint(x, y, step_size) for y in range(0, im1.shape[0], step_size)
                                    for x in range(0, im1.shape[1], step_size)]

im1_keypts = cv2.drawKeypoints(im1,kp, None)
im2_keypts = cv2.drawKeypoints(im2,kp, None)
cv2.imshow( 'im1_keypts', im1_keypts)
cv2.imshow( 'im2_keypts', im2_keypts)
cv2.waitKey(0)

startTime = time.time()
dense_feat1 = sift.compute(im1, kp)
dense_feat2 = sift.compute(im2, kp)
print 'time taken for DAISY in sec : ', time.time() - startTime


#
Example #17
0
def get_scale_hist(train_data_dir, param):
    kp_file_suffix = "-kp-minsc-2.0.h5"

    # Check if scale histogram file exists
    hist_file_name = train_data_dir + 'scales-histogram-minsc-' + str(
        param.dataset.fMinKpSize) + '.h5'
    print(hist_file_name)
    if not os.path.exists(hist_file_name):

        # read all positive keypoint scales
        list_png_file = []
        # for files in os.listdir(train_data_dir):
        #     if files.endswith(".png"):
        #         list_png_file = list_png_file + [files]
        list_png_file = list(paths.list_images(train_data_dir))

        # all_scales = np.array(dtype=float)
        all_scales = []
        for png_file in list_png_file:
            # print(png_file)
            # kp_file_name = train_data_dir + png_file.replace('.png', '_P.mat')
            kp_file_name = png_file.replace('.png', '_P.mat')
            kp_file_name = png_file.replace('.jpg', '_P.mat')
            # print(kp_file_name)
            # if a matlab keypointed file exists:
            if os.path.exists(kp_file_name):
                print("matlab kp file exists")
                cur_pos_kp = scipy.io.loadmat(kp_file_name)['TFeatures']
                cur_pos_kp = np.asarray(cur_pos_kp, dtype='float')
                all_scales += [cur_pos_kp[5, :].flatten()]
            # if not
            else:
                # print("trying to read ascii kp file...")
                # look for a Lowe's ASCII keypoint file
                # ascci_kp_file = os.path.splitext(png_file)[0]+".sift_bak"
                # print(ascci_kp_file)
                # cur_pos_kp = readAsciiSiftFile(ascci_kp_file)[0]
                kpfilename = os.path.splitext(png_file)[0] + kp_file_suffix
                # print(kpfilename)
                with h5py.File(kpfilename, "r") as kpfile:
                    kps = kpfile['valid_keypoints'][()]
                    cur_pos_kp = [
                        cv2.KeyPoint(x[0], x[1], x[2], x[3], int(x[4]),
                                     int(x[5])) for x in kps
                    ]
                # print(cur_pos_kp)
                # print(type(cur_pos_kp))
                # extract a 1D array of kp scale values
                all_scales += [x.size for x in cur_pos_kp]
                # all_scales += [cur_pos_kp[2, :].flatten()]
        # print(len(all_scales))

        try:
            all_scales = np.concatenate(all_scales)
        except ValueError as err:
            print(err)
            pass

        # make histogram
        hist, bin_edges = np.histogram(all_scales, bins=100)
        hist_c = (bin_edges[1:] + bin_edges[:-1]) * 0.5

        # save to h5 file
        print("Saving hist file...")
        with h5py.File(hist_file_name, 'w') as hist_file:
            hist_file['all_scales'] = all_scales
            hist_file['histogram_bins'] = hist
            hist_file['histogram_centers'] = hist_c

    # Load from the histogram file
    print("Reading hist file...")
    with h5py.File(hist_file_name, 'r') as hist_file:
        scale_hist = np.asarray(hist_file['histogram_bins'],
                                dtype=float).flatten()
        # print(scale_hist)
        scale_hist /= np.sum(scale_hist)
        # print(scale_hist)
        scale_hist_c = np.asarray(hist_file['histogram_centers']).flatten()

    return scale_hist, scale_hist_c
Example #18
0
def harris_detector(input_img, output_img, n_keypoints, block_size, ksize,
                    scales, threshold):

    # Get grayscale of the image
    gray = cv.cvtColor(input_img, cv.COLOR_BGR2GRAY)

    blured_img = cv.GaussianBlur(gray, ksize=(0, 0), sigmaX=4.5)

    # Get gradients over each coordenate
    g1 = cv.Sobel(blured_img, -1, 1, 0)
    g2 = cv.Sobel(blured_img, -1, 0, 1)

    # Compute the gaussian piramid of each gradient and the image.
    piramide_g1 = [g1] + piramide_gaussiana(g1, 1, scales)
    piramide_g2 = [g2] + piramide_gaussiana(g2, 1, scales)
    piramide = [gray] + piramide_gaussiana(gray, 1, scales)

    # K is going to store all the keypoints.
    k = []

    # This is going to be the vector of images to return.
    imgs = []

    # For each of the scales.
    for scale in range(scales):

        # Get the gradient directions
        dx = piramide_g1[scale]
        dy = piramide_g2[scale]

        # Get the eigenvalues of the corresponding scale of the piramid.
        data = cv.cornerEigenValsAndVecs(piramide[scale], block_size, ksize)

        e1 = data[:, :, 0]
        e2 = data[:, :, 1]

        # Calculate matrix of harmonic means
        h_means = harmonic_mean(e1, e2)
        h_means[np.isnan(h_means)] = 0

        # Calculate local maxima
        peaks = peak_local_max(h_means,
                               min_distance=block_size // 2,
                               num_peaks=n_keypoints,
                               threshold_abs=threshold)
        print("\tMáximos en la escala ", scale, ": ", len(peaks))

        size = (scales - scale + 1) * block_size
        # Auxiliar array with the keypoints of the current scale.
        k_aux = []

        # For each of the local peaks.
        for peak in peaks:

            # Retrieve local maxima coordenates.
            x = peak[0]
            y = peak[1]
            # Calculate the angle of the gradient in that point.
            norm = np.sqrt(dx[x][y] * dx[x][y] + dy[x][y] * dy[x][y])
            sin = dy[x][y] / norm if norm > 0 else 0
            cos = dx[x][y] / norm if norm > 0 else 0
            angle = np.degrees(np.arctan2(sin, cos)) + 180

            # Add Keypoints to current vector.
            k_aux += [cv.KeyPoint(y, x, _size=size, _angle=angle)]

            # Add keypoints to global keypoints vector with the transformed coordenates.
            k += [
                cv.KeyPoint(y * (2**scale),
                            x * (2**scale),
                            _size=size,
                            _angle=angle)
            ]

        # Copy current scale
        scale_img = np.copy(piramide[scale])

        # Draw keypoints over it
        cv.drawKeypoints(piramide[scale], k_aux, scale_img, 0, 5)

        # Add it to the return array
        imgs += [scale_img]

    # Add all keypoints over the original color image and add it to the return array.
    imgs = [cv.drawKeypoints(input_img, k, output_img, flags=5)] + imgs

    return imgs, k
Example #19
0
def get_ransac_image_byte(img_1, locations_1, descriptors_1, img_2, locations_2, descriptors_2, save_path=None, use_opencv_match_vis=True):
  """
  Args:
      img_1: image bytes. JPEG, PNG
      img_2: image bytes. JPEG, PNG

  Return:
      ransacn result PNG image as byte
      score: number of matching inlier
  """

  # Convert image byte to 3 channel numpy array
  with Image.open(io.BytesIO(img_1)) as img:
    img_1 = load_image_into_numpy_array(img)
  with Image.open(io.BytesIO(img_2)) as img:
    img_2 = load_image_into_numpy_array(img)

  inliers, locations_1_to_use, locations_2_to_use = get_inliers(locations_1, descriptors_1, locations_2, descriptors_2)

  # Visualize correspondences, and save to file.
  fig, ax = plt.subplots(figsize=IMAGE_SIZE)
  inlier_idxs = np.nonzero(inliers)[0]
  score = sum(inliers)
  if score is None:
    score = 0
#   # For different size of image, transform img_1 to fit to img_2
#   print('img_1 shape', img_1.shape)
#   print('img_1 type', type(img_1))
#   print('img_2 shape', img_2.shape)

#   ratio = float(img_2.shape[1]) / img_1.shape[1]
#   print('ratio', ratio)

#   resize_img_1 = imresize(img_1, ratio, interp='bilinear', mode=None)
#   print('resize_img_1 shape', resize_img_1.shape)

  if use_opencv_match_vis:
    inlier_matches = []
    for idx in inlier_idxs:
        inlier_matches.append(cv2.DMatch(idx, idx, 0))
        
    kp1 =[]
    for point in locations_1_to_use:
        kp = cv2.KeyPoint(point[1], point[0], _size=1)
        kp1.append(kp)

    kp2 =[]
    for point in locations_2_to_use:
        kp = cv2.KeyPoint(point[1], point[0], _size=1)
        kp2.append(kp)


    ransac_img = cv2.drawMatches(img_1, kp1, img_2, kp2, inlier_matches, None, flags=0)
    ransac_img = cv2.cvtColor(ransac_img, cv2.COLOR_BGR2RGB)    
    image_byte = cv2.imencode('.png', ransac_img)[1].tostring()

  else:
    plot_matches(
        ax,
        img_1,
        img_2,
        locations_1_to_use,
        locations_2_to_use,
        np.column_stack((inlier_idxs, inlier_idxs)),
        matches_color='b')
    ax.axis('off')
    extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())      
    buf = io.BytesIO()
    fig.savefig(buf, bbox_inches=extent, format='png')
    plt.close('all') # close resources. 
    image_byte = buf.getvalue()

  return image_byte, score
def auto_correspondences(im1, im2, face1, face2):
    #Good Features to Track detection + SIFT descriptor
    im1Gray = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
    im2Gray = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)
    #    im1Hue = cv.cvtColor(im1, cv.COLOR_BGR2HSV)
    #    im2Hue = cv.cvtColor(im2, cv.COLOR_BGR2HSV)
    #    im1Gray = im1Hue[..., 0]
    #    im2Gray = im2Hue[..., 0]
    im1Gray = cv.GaussianBlur(im1Gray, (5, 5), 0.1)
    im2Gray = cv.GaussianBlur(im2Gray, (5, 5), 0.1)
    x1, y1, w1, h1 = face1[0, 1], face1[0, 1], face1[0, 2], face1[0, 3]
    x2, y2, w2, h2 = face2[0, 0], face2[0, 1], face2[0, 2], face2[0, 3]

    corners1 = cv.goodFeaturesToTrack(im1Gray[y1:y1 + h1, x1:x1 + w1],
                                      maxCorners=200,
                                      qualityLevel=0.05,
                                      minDistance=9)
    #    corners1 = np.int0(corners1)
    kcorners1 = []
    corners2 = cv.goodFeaturesToTrack(im2Gray[y2:y2 + h2, x2:x2 + w2],
                                      maxCorners=200,
                                      qualityLevel=0.05,
                                      minDistance=9)
    #    corners2 = np.int0(corners2)
    kcorners2 = []

    for i in corners1:
        x, y = i.ravel() + face1[0, :2]
        kcorners1.append(cv.KeyPoint(x, y, 32))

    for i in corners2:
        x, y = i.ravel() + face2[0, :2]
        #        pt = cv.Point2f(x, y)
        kcorners2.append(cv.KeyPoint(x, y, 32))

    for i in corners1:
        x, y = (i.ravel() + face1[0, :2]).astype(int)
#        cv.circle(im1,(x,y),3,255,-1)

#    cv.imshow('kp1', im1)

    for i in corners2:
        x, y = (i.ravel() + face2[0, :2]).astype(int)
#        cv.circle(im2,(x,y),3,255,-1)

#    cv.imshow('kp2', im2)

    sift = cv.xfeatures2d.SIFT_create(nfeatures=4)
    #    kcorners1, des1 = sift.detectAndCompute(im1Gray, None)
    #    kcorners2, des2 = sift.detectAndCompute(im2Gray, None)

    kcorners1, des1 = sift.compute(im1Gray, kcorners1)
    kcorners2, des2 = sift.compute(im2Gray, kcorners2)

    #    plt.imshow(im3,),plt.show()

    #    im1c=cv.drawKeypoints(im1,kcorners1,outImage = None, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    #    im2c=cv.drawKeypoints(im2,kcorners2,outImage = None, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    #    cv.imshow('kp1', im1c)
    #    cv.imshow('kp2', im2c)

    bf = cv.BFMatcher()
    matchesF = bf.knnMatch(des1, des2, k=1)

    #    good = []
    #    for m,n in matches:
    #        if m.distance < 0.99*n.distance:
    #            good.append([m])

    #cv2.drawMatchesKnn expects list of lists as matches.
    #    im3 = np.zeros_like(im1)
    #    im3 = cv.drawMatchesKnn(im1,kcorners1,im2,kcorners2,matchesF,flags=2, outImg = None)
    #    cv.imshow('match', im3)
    #     print(len(matchesF))
    m_forward = np.zeros((len(matchesF), 2))

    for i in range(len(matchesF)):
        m_forward[i, 0] = matchesF[i][0].queryIdx
        m_forward[i, 1] = matchesF[i][0].trainIdx
#        print kcorners2[np.int32(m_forward[i, 1])].pt

    matchesB = bf.knnMatch(des2, des1, k=1)

    #    good = []
    #    for m,n in matches:
    #        if m.distance < 0.99*n.distance:
    #            good.append([m])

    #cv2.drawMatchesKnn expects list of lists as matches.
    #    im3 = np.zeros_like(im1)
    #    im4 = cv.drawMatchesKnn(im2,kcorners2,im1,kcorners1,matchesB,flags=2, outImg = None)
    #    cv.imshow('match2', im4)
    #     print(len(matchesB))
    m_backward = np.zeros((len(matchesB), 2))
    for i in range(len(matchesB)):
        m_backward[i, 0] = matchesB[i][0].trainIdx
        #        print kcorners2[np.int32(m_backward[i, 0])].pt
        m_backward[i, 1] = matchesB[i][0].queryIdx

    return kcorners1, kcorners2, m_forward, m_backward, matchesF, matchesB
Example #21
0
 def detectAndCompute(self, img, mask=None):
     assert mask is None, 'mask support not implemented'
     kps, desc, score = self.extractor.extract(img)
     kps = [cv2.KeyPoint(x, y, s, 0, r) for (x, y, s), r in zip(kps, score)]
     return kps, desc
Example #22
0
out_fast = cv2.VideoWriter("./fast.avi", fourcc, 24.0, (3840,2160))
out_orb = cv2.VideoWriter("./orb.avi", fourcc, 24.0, (3840,2160))

while(True):
    ret, frame = video_capture.read()
    if(frame is None): break #check for empty frames (en of video)
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    
    ##Harris
    mask_harris = cv2.cornerHarris(np.float32(frame_gray), blockSize=2, ksize=3, k=0.04) #2, 3, 0.04 // 2, 5, 0.07
    #mask_harris = cv2.dilate(mask_harris, None)    
    cutout = np.sort(mask_harris.flatten())[-500] #sort from smaller to higher, then take index for cutout
    corners = np.where(mask_harris > cutout)
    corners = zip(corners[0], corners[1])
    kp = list()
    for i in corners: kp.append(cv2.KeyPoint(i[1], i[0], 20))
    frame_harris = cv2.drawKeypoints(frame_gray, kp, None, [0, 0, 255], 
                                     flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

            
    ##Shi-Tomasi
    #maxCorners: Maximum number of corners to return
    #qualityLevel: Parameter characterizing the minimal accepted quality of image corners.
    #minDistance: Minimum possible Euclidean distance between the returned corners.
    #blockSize: Size of an average block for computing a derivative covariation matrix over each pixel neighborhood.

    corners = cv2.goodFeaturesToTrack(frame_gray, maxCorners=500, qualityLevel=0.01, minDistance=10, blockSize=2)
    corners = np.int0(corners)
    kp = list()
    for i in corners: kp.append(cv2.KeyPoint(i.ravel()[0], i.ravel()[1], 20))
    frame_shitomasi = cv2.drawKeypoints(frame_gray, kp, None, [0, 0, 255], 
Example #23
0
    blobframe = cv2.drawKeypoints(gray3, keypoints, np.array([]), (0, 0, 255),
                                  cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    cv2.imshow("preview", gray2)
    cv2.imshow("preview2", frame2)
    if blobframe is not None:
        cv2.imshow("previewblob", blobframe)

    if record and face is not None and center is not None and blobframe is not None:
        (x, y, w, h) = face
        (px, py) = center
        if len(keypoints) > 4:
            keypoints = keypoints[0:4]
        else:
            keypoints = keypoints + (
                4 - len(keypoints)) * [cv2.KeyPoint(2, -1, -1)]
        print(','.join(
            [str(x), str(y), str(px), str(py)] +
            [str(x.pt[0])
             for x in keypoints] + [str(x.pt[1]) for x in keypoints]))

    nf = nf + 1
    if time() - ptime > 5:
        # print(str(nf/(time()-ptime)))
        ptime = time()
        nf = 0
    rval, frame = vc.read()
    rval2, frame2 = vc2.read()
    key = cv2.waitKey(20)
    if key != -1:
        #     print(key)
Example #24
0
def unpickle_cv2(arr):
    index = []
    for point in arr:
        temp = cv2.KeyPoint(x=point[0][0],y=point[0][1],_size=point[1], _angle=point[2], _response=point[3], _octave=point[4], _class_id=point[5])
        index.append(temp)
    return np.array(index)
Example #25
0
 def to_cv2_kp(self, kp):
     return cv2.KeyPoint(kp[1], kp[0], kp[2], kp[3] / np.pi * 180)
Example #26
0
    def set_blur_range(img,
                       instance_scores,
                       keypoint_scores,
                       keypoint_coords,
                       min_pose_score=0.5,
                       min_part_score=0.5):
        out_img = img
        adjacent_keypoints = []
        cv_keypoints = []

        people_count = []
        list_start_blur_y = []
        list_end_blur_y = []
        list_start_blur_x = []
        list_end_blur_x = []

        for ii, score in enumerate(instance_scores):
            if score < min_pose_score:
                continue

            new_keypoints = posenet.utils.get_adjacent_keypoints(
                keypoint_scores[ii, :], keypoint_coords[ii, :, :],
                min_part_score)
            adjacent_keypoints.extend(new_keypoints)

            people_count.append(ii)

            kc2 = []
            #print(ii, "번째 객체")

            for ks, kc in zip(keypoint_scores[ii, :5],
                              keypoint_coords[ii, :5, :]):
                if ks < min_part_score:
                    continue
                cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
                if kc[1] < 0:
                    kc2.append([kc[0], 1])
                elif kc[1] > width:
                    kc2.append([kc[0], width - 3])
                elif kc[0] < 0:
                    kc2.append([1, kc[1]])
                elif kc[0] > height:
                    kc2.append([height - 3, kc[1]])
                elif kc[0] < 0 and kc[1] < 0:
                    kc2.append([1, 1])
                elif kc[0] > height and kc[1] > width:
                    kc2.append([height - 3, width - 3])
                else:
                    kc2.append(kc)

            if len(kc2) >= 3:
                # 코
                nose_y = int(kc2[0][0])  # y축
                nose_x = int(kc2[0][1])  # x축
                # 오른쪽눈
                eye_right_y = int(kc2[1][0])
                eye_right_x = int(kc2[1][1])
                # 왼쪽눈
                eye_left_y = int(kc2[2][0])
                eye_left_x = int(kc2[2][1])

                #print("> 1.Nose(y:",nose_y,", x:",nose_x,")","2.EyeRight(y:",eye_right_y,", x:",eye_right_x,")",
                #      "3.EyeLeft(y:",eye_left_y,", x:",eye_left_x,")")

                # y값 최솟값, 최댓값, 중앙값 구하기
                value_y = [nose_y, eye_right_y, eye_left_y]
                min_value_y = min(value_y)
                max_value_y = max(value_y)
                mean_value_y = int(sum(value_y) / len(value_y))
                # x값 최솟값, 최댓값, 중앙값 구하기
                value_x = [nose_x, eye_right_x, eye_left_x]
                min_value_x = min(value_x)
                max_value_x = max(value_x)
                mean_value_x = int(sum(value_x) / len(value_x))

                # 블러 크기를 키우기 위한 변수 만들기
                value_y_gap = abs(max_value_y - min_value_y)
                value_x_gap = abs(max_value_x - min_value_x)

                # 블러 씌울 부분 변수 선언
                start_blur_y = abs(mean_value_y - int(value_y_gap * 1.7))
                end_blur_y = mean_value_y + int(value_y_gap * 2)
                start_blur_x = abs(min_value_x - value_y_gap)
                end_blur_x = max_value_x + value_y_gap
                if start_blur_x == end_blur_x:
                    end_blur_x += 3
                if start_blur_y == end_blur_y:
                    end_blur_y += 3
            # 근만씨 동영상 오류 해결중
                if abs(eye_right_x - eye_left_x) > 20:
                    start_blur_y = abs(min_value_y - value_y_gap)
                    end_blur_y = nose_y + abs(eye_right_x -
                                              eye_left_x) + value_y_gap
                    start_blur_x = abs(min_value_x - value_x_gap)
                    end_blur_x = max_value_x + value_x_gap
                #print("[",start_blur_y,":",end_blur_y,", ",start_blur_x,":",end_blur_x,"]")

            elif len(kc2) >= 2:
                # 코
                nose_y = int(kc2[0][0])  # y축
                nose_x = int(kc2[0][1])  # x축
                # 오른쪽눈
                eye_right_y = int(kc2[1][0])
                eye_right_x = int(kc2[1][1])

                #print("> 1.Nose(y:",nose_y,", x:",nose_x,")","2.EyeRight(y:",eye_right_y,", x:",eye_right_x,")")

                # y값 최솟값, 최댓값, 중앙값 구하기
                value_y = [nose_y, eye_right_y]
                min_value_y = min(value_y)
                max_value_y = max(value_y)
                mean_value_y = int(sum(value_y) / len(value_y))
                # x값 최솟값, 최댓값, 중앙값 구하기
                value_x = [nose_x, eye_right_x]
                min_value_x = min(value_x)
                max_value_x = max(value_x)
                mean_value_x = int(sum(value_x) / len(value_x))
                # 블러 크기를 키우기 위한 변수 만들기
                value_y_gap = abs(max_value_y -
                                  int(sum(value_y) / len(value_y)))
                value_x_gap = abs(max_value_x -
                                  int(sum(value_x) / len(value_x)))

                # 블러 씌울 부분 변수 선언
                start_blur_y = abs(mean_value_y - int(value_y_gap * 1.7))
                end_blur_y = mean_value_y + int(value_y_gap * 2)
                start_blur_x = abs(mean_value_x - value_y_gap)
                end_blur_x = mean_value_x + value_y_gap
                if start_blur_x == end_blur_x:
                    end_blur_x += 3
                if start_blur_y == end_blur_y:
                    end_blur_y += 3
                #print("[",start_blur_y,":",end_blur_y,", ",start_blur_x,":",end_blur_x,"]")

            else:
                # 블러 씌울 부분 변수 선언
                start_blur_y = 0
                end_blur_y = 0
                start_blur_x = 0
                end_blur_x = 0

            list_start_blur_y.append(start_blur_y)
            list_end_blur_y.append(end_blur_y)
            list_start_blur_x.append(start_blur_x)
            list_end_blur_x.append(end_blur_x)

        return list_start_blur_y, list_end_blur_y, list_start_blur_x, list_end_blur_x, people_count
import skimage.data as skid
import cv2
import pylab as plt
import scipy.misc

img = scipy.misc.face()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

plt.figure(figsize=(20, 10))
plt.imshow(img)
plt.show()

sift = cv2.xfeatures2d.SIFT_create()

step_size = 5
kp = [
    cv2.KeyPoint(x, y, step_size) for y in range(0, gray.shape[0], step_size)
    for x in range(0, gray.shape[1], step_size)
]

img = cv2.drawKeypoints(gray, kp, img)

plt.figure(figsize=(20, 10))
plt.imshow(img)
plt.show()

dense_feat = sift.compute(gray, kp)
Example #28
0
        do_motion_fade = False
        if do_motion_fade:
            motion_mask = motion2(new_frame, base)
        else:
            motion_mask = None
        accum = overlay(new_frame, base, motion_mask)

    final = accum
    if args.draw_keypoints:
        new_filtered = []
        if affine_new != None:
            affine_T = affine_new.T
            for kp in filtered:
                a = np.array([kp.pt[0], kp.pt[1], 1.0])
                pt = np.dot(a, affine_T)
                new_kp = cv2.KeyPoint(pt[0], pt[1], kp.size)
                new_filtered.append(new_kp)
        final = cv2.drawKeypoints(accum,
                                  new_filtered,
                                  color=(0, 255, 0),
                                  flags=0)

    cv2.imshow('bgr', res1)
    cv2.imshow('smooth', new_frame)
    cv2.imshow('final', final)
    #output.write(res1)
    if args.write_smooth:
        output.write(final)
    if 0xFF & cv2.waitKey(5) == 27:
        break
Example #29
0
                    continue  # goto next scene

                if args.save2txt:
                    out_dir = os.path.join(res_dir, scene_name)
                    if not os.path.exists(out_dir):
                        os.makedirs(out_dir)
                    pts_fn = os.path.join(out_dir, '%d_kp.txt' % 1)
                    np.savetxt(pts_fn, pts0)

                # convert to cv2 kp for prototype homogeneity
                kp0 = []
                for pt in pts0.T:
                    kp = cv2.KeyPoint(x=pt[0],
                                      y=pt[1],
                                      _size=4,
                                      _angle=0,
                                      _response=0,
                                      _octave=0,
                                      _class_id=0)
                    kp0.append(kp)

                # draw kp on img
                img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
                kp_on_img0 = np.tile(np.expand_dims(img0, 2), (1, 1, 3))
                for i, kp in enumerate(kp0):
                    pt = (int(round(kp.pt[0])), int(round(kp.pt[1])))
                    cv2.circle(kp_on_img0, pt, 1, (0, 255, 0), -1, lineType=16)

                # description
                des_coarse = sess.run(feats_op[args.feat_name],
                                      feed_dict={img_op: patch})[0, :, :, :]
Example #30
0
    # 匹配对筛选
    min_dist = 1000
    max_dist = 0
    disdif_avg = 0

    # 统计平均距离差
    for m, n in matches:
        disdif_avg += n.distance - m.distance
    disdif_avg = disdif_avg / len(matches)
    # print('disdif_avg:', disdif_avg)
    for m, n in matches:
        #自适应阈值
        if n.distance > m.distance + 1*disdif_avg:
        # if m.distance < 0.9 * n.distance: 
            goodMatch.append(m)
            p2 = cv2.KeyPoint(kps_right[m.trainIdx][0],  kps_right[m.trainIdx][1],  1)
            p1 = cv2.KeyPoint(kps_left[m.queryIdx][0], kps_left[m.queryIdx][1], 1)
            locations_1_to_use.append([p1.pt[0], p1.pt[1]])
            locations_2_to_use.append([p2.pt[0], p2.pt[1]])
            dis.append([n.distance, m.distance])
    #goodMatch = sorted(goodMatch, key=lambda x: x.distance)
    # print('match num is %d' % len(goodMatch))
    locations_1_to_use = np.array(locations_1_to_use)
    locations_2_to_use = np.array(locations_2_to_use)
    dis = np.array(dis)
    
    # Perform geometric verification using RANSAC.
    _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                              transform.AffineTransform,
                              min_samples=3,
                              residual_threshold=_RESIDUAL_THRESHOLD,