Exemple #1
0
def processInput():
    print ""
    if inputArgs.left == "" or inputArgs.right == "":
        print "Missing images!"
        quit()

    # here we go ...

    # load image pair
    img_l = cv2.imread(inputArgs.left)
    img_r = cv2.imread(inputArgs.right)

    if img_l == None or img_r == None:
        print "Missing images!"
        quit()

    # we like them gray
    gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
    gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

    # which decetor are we using
    if inputArgs.feature == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif inputArgs.feature == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif inputArgs.feature == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    elif inputArgs.feature == 'brisk':
        detector = cv2.BRISK()
        norm = cv2.NORM_HAMMING
    else:
        print "Wrong feature detector!"
        quit()

    # how are we matching detected features
    if inputArgs.match == 'bf':
        matcher = cv2.BFMatcher(norm)

    elif inputArgs.match == 'flann':
        # borrowed from: https://github.com/Itseez
        FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
        FLANN_INDEX_LSH = 6

        flann_params = []
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        else:
            flann_params = dict(
                algorithm=FLANN_INDEX_LSH,
                table_number=6,  # 12
                key_size=12,  # 20
                multi_probe_level=1)  #2
        matcher = cv2.FlannBasedMatcher(
            flann_params, {})  # bug : need to pass empty dict (#1329)

    print "Using: " + inputArgs.feature + " with " + inputArgs.match
    print ""

    print "detecting ..."
    # find the keypoints and descriptors
    kp_l, des_l = detector.detectAndCompute(gray_l, None)
    kp_r, des_r = detector.detectAndCompute(gray_r, None)

    print "Left image features: " + str(len(kp_l))
    print "Right image features: " + str(len(kp_l))
    print ""
    # visualization
    if inputArgs.debug == 1:
        # left
        img_l_tmp = img_l.copy()
        #for kp in kp_l:
        #	x = int(kp.pt[0])
        #	y = int(kp.pt[1])
        #	cv2.circle(img_l_tmp, (x, y), 2, (0, 0, 255))
        img_l_tmp = cv2.drawKeypoints(img_l_tmp, kp_l, img_l_tmp, (0, 0, 255),
                                      cv2.DRAW_MATCHES_FLAGS_DEFAULT)
        head, tail = os.path.split(inputArgs.left)
        cv2.imwrite(head + "/" + "feat_" + tail, img_l_tmp)
        # right
        img_r_tmp = img_r.copy()
        #for kp in kp_r:
        #	x = int(kp.pt[0])
        #	y = int(kp.pt[1])
        #	cv2.circle(img_r_tmp, (x, y), 2, (0, 0, 255))
        img_r_tmp = cv2.drawKeypoints(img_r_tmp, kp_r, img_r_tmp, (0, 0, 255),
                                      cv2.DRAW_MATCHES_FLAGS_DEFAULT)
        head, tail = os.path.split(inputArgs.right)
        cv2.imwrite(head + "/" + "feat_" + tail, img_r_tmp)

    print "matching ..."

    # match
    raw_matches = matcher.knnMatch(des_l, trainDescriptors=des_r, k=2)
    print "Raw matches: " + str(len(raw_matches))

    # filter matches: per Lowe's ratio test
    filtered_matches = []
    mkp_l = []
    mkp_r = []

    for m in raw_matches:
        if len(m
               ) == 2 and m[0].distance < m[1].distance * inputArgs.proportion:
            filtered_matches.append(m)
            mkp_l.append(kp_l[m[0].queryIdx])
            mkp_r.append(kp_r[m[0].trainIdx])
    print "Filtered matches: " + str(len(filtered_matches))

    # visualization
    if inputArgs.debug == 1:
        # draw points
        img_l_tmp = cv2.drawKeypoints(
            img_l_tmp, mkp_l, img_l_tmp, (255, 0, 0),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        head, tail = os.path.split(inputArgs.left)
        #cv2.imwrite(head+"/"+"feat_"+tail, img_l_tmp)
        img_r_tmp = cv2.drawKeypoints(
            img_r_tmp, mkp_r, img_r_tmp, (255, 0, 0),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        head, tail = os.path.split(inputArgs.right)
        #cv2.imwrite(head+"/"+"feat_"+tail, img_r_tmp)

        # merge image side by side
        h_l, w_l = img_l_tmp.shape[:2]
        h_r, w_r = img_r_tmp.shape[:2]
        img_tmp = np.zeros((max(h_l, h_l), w_r + w_r, 3), np.uint8)
        img_tmp[:h_l, :w_l] = img_l_tmp
        img_tmp[:h_r, w_l:w_l + w_r] = img_r_tmp

        # draw lines
        for m in filtered_matches:
            cv2.line(img_tmp, (int(round(kp_l[m[0].queryIdx].pt[0])),
                               int(round(kp_l[m[0].queryIdx].pt[1]))),
                     (int(w_l + round(kp_r[m[0].trainIdx].pt[0])),
                      int(round(kp_r[m[0].trainIdx].pt[1]))), (255, 0, 0), 1)

        cv2.imwrite(inputArgs.name + "_features.jpg", img_tmp)

    # filter matches: per direction (since it's a stereo pair, most of the points should have the same angle between them)
    if inputArgs.stddev != 0.0:
        ang_stddev = 360.0
        stddev = 180.0
        while abs(stddev) > inputArgs.stddev:
            ang_stddev = stddev
            raw_matches = []  # silly !!!
            for m in filtered_matches:  # silly !!!
                raw_matches.append(m)  # silly !!!

            filtered_matches = []
            mkp_l = []
            mkp_r = []

            ang = []
            for m in raw_matches:
                xDiff = kp_r[m[0].trainIdx].pt[0] - kp_l[m[0].queryIdx].pt[
                    0]  #p2.x - p1.x
                yDiff = kp_r[m[0].trainIdx].pt[1] - kp_l[m[0].queryIdx].pt[
                    1]  #p2.y - p1.y
                #print math.degrees(math.atan2(yDiff,xDiff))
                ang.append(math.degrees(math.atan2(yDiff, xDiff)))

            mean = np.mean(ang)
            differences = [(value - mean)**2 for value in ang]
            stddev = np.mean(differences)**0.5
            #print mean
            #print stddev

            ang = []
            for m in raw_matches:
                xDiff = kp_r[m[0].trainIdx].pt[0] - kp_l[m[0].queryIdx].pt[
                    0]  #p2.x - p1.x
                yDiff = kp_r[m[0].trainIdx].pt[1] - kp_l[m[0].queryIdx].pt[
                    1]  #p2.y - p1.y
                ang_tmp = math.degrees(math.atan2(yDiff, xDiff))
                if (mean + stddev) > (mean - stddev):
                    if (mean + stddev) >= ang_tmp and (mean -
                                                       stddev) <= ang_tmp:
                        filtered_matches.append(m)
                        mkp_l.append(kp_l[m[0].queryIdx])
                        mkp_r.append(kp_r[m[0].trainIdx])
                        ang.append(math.degrees(math.atan2(yDiff, xDiff)))
                else:
                    if (mean + stddev) <= ang_tmp and (mean -
                                                       stddev) >= ang_tmp:
                        filtered_matches.append(m)
                        mkp_l.append(kp_l[m[0].queryIdx])
                        mkp_r.append(kp_r[m[0].trainIdx])
                        ang.append(math.degrees(math.atan2(yDiff, xDiff)))

            ##print np.median(ang)
            mean = np.mean(ang)
            differences = [(value - mean)**2 for value in ang]
            stddev = np.mean(differences)**0.5
            #print mean
            #print stddev
            if (abs(ang_stddev) - abs(stddev)) < 0.001:
                break

        print "Filtered matches cheat: " + str(len(filtered_matches))

        mkp_pairs = zip(mkp_l, mkp_r)
        file = open(inputArgs.name + "_kp.txt", "w")
        for p in mkp_pairs:
            # left x , left y ; right x , right y
            file.write(
                str(p[0].pt[0]) + "," + str(p[0].pt[1]) + ";" +
                str(p[1].pt[0]) + "," + str(p[1].pt[1]) + "\n")
        file.close()

        # visualization
        if inputArgs.debug == 1:
            # draw lines
            for m in filtered_matches:
                cv2.line(img_tmp, (int(round(kp_l[m[0].queryIdx].pt[0])),
                                   int(round(kp_l[m[0].queryIdx].pt[1]))),
                         (int(w_l + round(kp_r[m[0].trainIdx].pt[0])),
                          int(round(kp_r[m[0].trainIdx].pt[1]))), (0, 255, 0),
                         1)

            cv2.imwrite(inputArgs.name + "_features.jpg", img_tmp)
Exemple #2
0

class SaveClass:
    def __init__(self):
        self.votes = None
        self.keypoints = None
        self.descriptors = None
        self.bodypart = None


unpacker_header = struct.Struct('= 2s I')
unpacker_list_header = struct.Struct('= 2s I I')
unpacker_image_header = struct.Struct('= 2s I I I I')
packer_ack_header = struct.Struct('= 2s I')

surf = cv2.SURF(500, nOctaves=3, nOctaveLayers=3)

test_bodypart = None
bodypart_knn_pos = None
bodypart_knn_neg = None
bodypart_trained_data_pos = None
bodypart_vote = []


def main(options, args):
    global test_bodypart
    global bodypart_knn_pos, bodypart_knn_neg, bodypart_trained_data_pos, bodypart_vote

    bodypart_trained_data_pos = SaveClass()
    bodypart_trained_data_pos = pickle.load(open(options.train_data_p, 'rb'))
    bodypart_trained_data_neg = SaveClass()
import json
from pprint import pprint
import cv2
import os
import re
import numpy as np
import pickle
import random

import multiprocessing as mp
from multiprocessing import Pool
from multiprocessing import Manager
from threading import Lock

# SURF Detector Object
surf = cv2.SURF(400, nOctaves=4, nOctaveLayers=4)

class SaveClass:
    def __init__(self, votes, keypoints, descriptors, bodypart):
        self.votes = votes
        self.keypoints = keypoints
        self.descriptors = descriptors
        self.bodypart = bodypart

lock = Lock()
manager = None
bodypart_desc_train_pos_all = None
bodypart_desc_train_neg_all = None
bodypart_vote_train_pos_all = None
bodypart_vote_train_neg_all = None
Exemple #4
0
draw_params = dict(matchColor = (0,255,0),
                   singlePointColor = (255,0,0),
                   matchesMask = matchesMask,
                   flags = 0)

img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)
plt.figure(figsize(10,10))
plt.imshow(img3,),plt.show()

# <markdowncell>

# However, sift contains position information and is comparatively slow, we'll use SURF instead. With invariant direction, only size as stars should look the same from any angle (obviously).

# <codecell>

surf = cv2.SURF(10)
surf.upright = True #Direction invariant
kp1, des1 = surf.detectAndCompute(img1,None)
kp2, des2 = surf.detectAndCompute(img2,None)
#for kps in kp1:
#    print "x: " + str(kps.pt[0]) + " y: " + str(kps.pt[1]) + " Size: " + str(kps.size) + " Octave: " \
#    + str(kps.octave) + " Response: " + str(kps.response)
#for desc in des1:
#    print desc
#    break
print len(kp1)

# <codecell>

# FLANN parameters
FLANN_INDEX_KDTREE = 0
Exemple #5
0
              0.0]).reshape(1, 8)  # distortion coefficients
K = np.array(
    [1918.270000, 2.489820, 17.915, 0.0, 1922.580000, -63.736, 0.0, 0.0,
     1.0]).reshape(3, 3)
K2 = np.array([
    1909.910000, 0.571503, -33.069000, 0.0, 1915.890000, -10.306, 0.0, 0.0, 1.0
]).reshape(3, 3)
K_inv = np.linalg.inv(K)
K2_inv = np.linalg.inv(K2)

# undistort the images first
first_rect = cv2.undistort(first_img, K, d)
second_rect = cv2.undistort(second_img, K2, d)

# extract key points and descriptors from both images
detector = cv2.SURF(400)
first_key_points, first_descriptors = detector.detectAndCompute(
    first_rect, None)
second_key_points, second_descriptos = detector.detectAndCompute(
    second_rect, None)

print "X: %d Y: %d" % (len(first_key_points), len(second_key_points))

# match descriptors
matcher = cv2.BFMatcher(cv2.NORM_L1, True)
matches = matcher.match(first_descriptors, second_descriptos)
print "Matches: %d" % len(matches)
# generate lists of point correspondences
first_match_points = np.zeros((len(matches), 2), dtype=np.float32)
second_match_points = np.zeros_like(first_match_points)
for i in range(len(matches)):
print 'Loaded ' + str(len(
    test_images_filenames)) + ' testing images filenames with classes ' + str(
        set(test_labels))

#Feature extractors:
myextractor = []
if extractor == 'sift':
    #myextractor.append(cv2.SIFT(nfeatures=100))
    myextractor.append(cv2.xfeatures2d.SIFT_create(nfeatures=100))
    D, L = SIFT_features(myextractor, train_images_filenames, train_labels)
elif extractor == 'n_sift':
    for i in range(num_sift_descriptors):
        myextractor.append(cv2.SIFT(nfeatures=100))
    D, L = n_SIFT_features(myextractor, train_images_filenames, train_labels)
elif extractor == 'surf':
    myextractor.append(cv2.SURF(100))
    D, L = SURF_features(myextractor, train_images_filenames, train_labels)
else:
    sys.exit('[ERROR]: Not a valid extractor')

if reduce_dim:
    #Dimensionality reduction using PCA due to high computation:
    pca = PCA(n_components=25)
    pca.fit(D)
    D = pca.transform(D)

if not os.path.exists('./models/' + experiment_filename):
    if classifier == 'knn':
        myclassifier = train_knn(D, L, experiment_filename)
    elif classifier == 'rf':
        myclassifier = train_random_forest(D, L, experiment_filename)
Exemple #7
0
                        choices=claOptions,
                        default=claOptions[0],
                        help="Which classifier to use. (default KNN)")
    extractOptions = ["SIFT", "SURF"]
    parser.add_argument("--extractor",
                        "-e",
                        choices=extractOptions,
                        default=extractOptions[0],
                        help="Which feature extractor to use. (default SIFT)")

    args = parser.parse_args()

    if args.extractor == "SIFT":
        extractor = cv2.SIFT()
    elif args.extractor == "SURF":
        extractor = cv2.SURF(400)
    data = DataSet(args.datapath, args.trainingPercentage, args.K,
                   args.randomTrainingSet, extractor)

    stateFile = (args.stateFile if args.stateFile != "" else
                 "classifierState") if args.load else ""
    if args.classifier == "KNN":
        classifier = KNNClassifier(stateFile)
    elif args.classifier == "SVM":
        classifier = SVMClassifier(stateFile)

    total = 0.0
    mark = time()
    data.load()
    stepTime = time() - mark
    total += stepTime
Exemple #8
0
def Matching(img1,img2,bbx_1,bbx_2):
    
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    
    sift = cv2.SURF()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params,search_params)
    matches = flann.knnMatch(des1,des2,k=2)

    good = []
    pts1 = []
    pts2 = []

    # ratio test as per Lowe's paper
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.7*n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.float64(pts1)
    pts2 = np.float64(pts2)
    
    pts1,pts2=removesamenumber(pts1,pts2)
    #print len(pts1)
    #print pts1
    #print pts2

    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)

    tri = Delaunay(pts1)

    triangle=tri.simplices

    lpoint=pts1
    rpoint=pts2

    # load bounding boxes
    centroid_1=bbx2centroid(bbx_1)
    centroid_2=bbx2centroid(bbx_2)    
    #print centroid_1
    #print centroid_2
    
    # Find triangles with controids
    triangle_1=[]
    triangle_2=[]
    
    for i in range(len(centroid_1)):
        p=centroid_1[i]
        for j in range(len(triangle)):
            lvertexa=[lpoint[triangle[j][0]]]
            lvertexb=[lpoint[triangle[j][1]]]
            lvertexc=[lpoint[triangle[j][2]]]
            rvertexa=[rpoint[triangle[j][0]]]
            rvertexb=[rpoint[triangle[j][1]]]
            rvertexc=[rpoint[triangle[j][2]]]
            if PointInTriangle(p, lvertexa, lvertexb, lvertexc):
                triangle_1.append([lvertexa, lvertexb, lvertexc])
                triangle_2.append([rvertexa, rvertexb, rvertexc])
        if not triangle_1:
            triangle_1.append([lvertexa, lvertexb, lvertexc])
            triangle_2.append([rvertexa, rvertexb, rvertexc])
    # Calculate triangle coordiantes
    
    ltc=[]
    rtc=[]
    for jj in range(len(triangle_1)):
        ltrico=[]
        rtrico=[]
    
        for lii in range(len(centroid_1)):
            ltricoordinate=TriangleCoordinates(centroid_1[lii],(np.float32(triangle_1[jj][0][0][0]),np.float32(triangle_1[jj][0][0][1])),\
                                               (np.float32(triangle_1[jj][1][0][0]),np.float32(triangle_1[jj][1][0][1])),\
                                               (np.float32(triangle_1[jj][2][0][0]),np.float32(triangle_1[jj][2][0][1])))
            ltrico.append(ltricoordinate)

        for rii in range(len(centroid_2)):   
            rtricoordinate=TriangleCoordinates(centroid_2[rii],(np.float32(triangle_2[jj][0][0][0]),np.float32(triangle_2[jj][0][0][1])),\
                                               (np.float32(triangle_2[jj][1][0][0]),np.float32(triangle_2[jj][1][0][1])),\
                                               (np.float32(triangle_2[jj][2][0][0]),np.float32(triangle_2[jj][2][0][1])))
            rtrico.append(rtricoordinate)
        ltc.append(ltrico)
        rtc.append(rtrico)
    #print ltc
    #print rtc
        
    # Generater cost matrix
    dis2=[]
    for dii in range(len(triangle_1)):
        dis1=[]
        for di in range(len(ltc[0])):
            dis=[]
            for d in range(len(rtc[0])):
                dis.append(distance(ltc[dii][di],rtc[dii][d]))
            dis1.append(dis)
        dis2.append(dis1)
    #print dis2

    #Combinatorial optimization
    m=Munkres()
    cost=np.zeros([len(ltrico),len(rtrico)])
    for mx in range(len(dis2)):                
        matrix=dis2[mx]
        indexes = m.compute(matrix)
        for row, column in indexes:
            cost[row][column]=cost[row][column]+1


    cost_matrix = []
    for row in cost:
        cost_row = []
        for col in row:
            cost_row += [1000 - col]
        cost_matrix += [cost_row]
    index=m.compute(cost_matrix)
    
    for row, column in index:
        print '(%d, %d)' % (row, column)

    return index
 def __init__(self, *args, **kw):
     super(SURFFinder, self).__init__(*args, **kw)
     self._surf = cv2.SURF(1000, _extended=True)
Exemple #10
0
def get_feature_detector_descriptor_extractor(
        feature_detector_name=str(),
        descriptor_extractor_name=None,
        feature_detector_params=None,
        descriptor_extractor_params=None):
    """
    :param feature_detector_name:
    :param descriptor_extractor_name:
    :param feature_detector_params: dict(nfeatures=1000) for ORB
    :param descriptor_extractor_params:
    :return:
    """
    assert len(feature_detector_name) != 0
    if feature_detector_params == None:
        feature_detector_params = dict()
    if descriptor_extractor_params == None:
        descriptor_extractor_params = dict()

    feature_detector_name = feature_detector_name.upper()

    normType = cv2.NORM_L2

    if feature_detector_name == "ORB" or feature_detector_name == "BRIEF" or feature_detector_name == "BRISK":
        normType = cv2.NORM_HAMMING

    feature_detector = descriptor_extractor = None
    if feature_detector_name == "ORB":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = descriptor_extractor = cv2.ORB(
                **feature_detector_params)
        else:
            feature_detector = descriptor_extractor = cv2.ORB_create(
                **feature_detector_params)

    elif feature_detector_name == "BRIEF":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = cv2.StarDetector(**feature_detector_params)
            #descriptor_extractor = cv2.BriefDescriptorExtractor(**descriptor_extractor_params) # seems not working
            descriptor_extractor = cv2.DescriptorExtractor_create("BRIEF")
        else:
            feature_detector = cv2.xfeatures2d.StarDetector_create(
                **feature_detector_params)
            descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
                **descriptor_extractor_params)

    elif feature_detector_name == "BRISK":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = descriptor_extractor = cv2.BRISK(
                **feature_detector_params)
        else:
            feature_detector = descriptor_extractor = cv2.BRISK_create(
                **feature_detector_params)

    elif feature_detector_name == "SURF":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = descriptor_extractor = cv2.SURF(
                **feature_detector_params)
        else:
            feature_detector = descriptor_extractor = cv2.xfeatures2d.SURF_create(
                **feature_detector_params)

    elif feature_detector_params == "SIFT":
        assert descriptor_extractor_name is None and len(
            descriptor_extractor_params) == 0
        if imutils.is_cv2():
            feature_detector = descriptor_extractor = cv2.SIFT(
                **feature_detector_params)
        else:
            feature_detector = descriptor_extractor = cv2.xfeatures2d.SIFT_create(
                **feature_detector_params)

    else:
        print(
            "Seems we have not predefined the target feature_detector and descriptor_extractor"
        )

    return feature_detector, descriptor_extractor, normType
Exemple #11
0
def params_from_image(img, surf=None):
    if surf == None:surf = cv2.SURF(1000)
    #kp, desc = surf.detect(img, None, False)
    kp, desc = surf.detectAndCompute(img, None)
    desc.shape = (-1, surf.descriptorSize())
    return {'img':img, 'kp':kp, 'desc':desc}
Exemple #12
0
#detects and computes descriptors in an image using the SURF algorithm
import cv2
import numpy as np
from matplotlib import pyplot as plt



img_path = 'C:\Users\EnviSAGE ResLab\Desktop\Accuracy Tests Journ\Rising.Warped.jpg'  #Warped Slave
img_c = cv2.imread(img_path)
img = cv2.cvtColor(img_c, cv2.COLOR_BGR2GRAY)

surf = cv2.SURF(500)
kp, des = surf.detectAndCompute (img, None)
img2 = cv2.drawKeypoints (img, kp, None, (255,0,0),4)
'''
CPs = []
coords = np.float32([ kp[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
'''
print len(kp)
cv2.imshow("Surf", img2)
cv2.waitKey(1000)
                           
Exemple #13
0
_surf_extended = False
_surf_threshold = 30000
_num_surf_features = 10000

# Codebook & Features
_codebook_sizes = [100, 300, 500, 750]
_num_trials = 10
_perc_docs_for_codebook = 0.05
_num_surf_features_codebook = 30000
_max_k_medoids_iters = 30
_H_partitions = 3
_V_partitions = 4

# not parameters
_num_histograms = ((2**(_H_partitions) - 1) + (2**(_V_partitions) - 1) - 1)
_surf_instance = cv2.SURF(_surf_threshold)
_surf_instance.upright = _surf_upright
_surf_instance.extended = _surf_extended
_surf_features = dict()
_print_interval = 20


def calc_surf_features(im_file):
    if im_file not in _surf_features:
        im = cv2.imread(im_file, 0)
        height = im.shape[0]
        width = im.shape[1]
        # surf_features[0] is the array of keypoints
        # surf_features[1] is the array of descriptors
        _surf_instance.hessianThreshold = _surf_threshold
        kps, deses = _surf_instance.detectAndCompute(im, None)
Exemple #14
0
def stitchImages(base_img_rgb, images_array, round):
    if ( len(images_array) < 1 ):
        print "Image array empty, ending stitchImages()"
        return base_img_rgb

    base_img = cv2.GaussianBlur(cv2.cvtColor(base_img_rgb, cv2.COLOR_BGR2GRAY), (5, 5), 0)

    # Use the SURF feature detector
    detector = cv2.SURF()

    # Find key points in base image for motion estimation
    base_features, base_descs = detector.detectAndCompute(base_img, None)

    # Parameters for nearest-neighbor matching
    FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
    flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    matcher = cv2.FlannBasedMatcher(flann_params, {})

    print "Iterating through next images..."

    closestImage = None

    # TODO: Thread this loop since each iteration is independent

    # Find the best next image from the remaining images
    for index, next_img_rgb in enumerate(images_array):
        next_img = cv2.GaussianBlur(cv2.cvtColor(next_img_rgb, cv2.COLOR_BGR2GRAY), (5, 5), 0)

        print "\t Finding points..."

        next_features, next_descs = detector.detectAndCompute(next_img, None)

        matches = matcher.knnMatch(next_descs, trainDescriptors=base_descs, k=2)
        print "\t Match Count: ", len(matches)

        matches_subset = filter_matches(matches)
        print "\t Filtered Match Count: ", len(matches_subset)

        distance = imageDistance(matches_subset)
        print "\t Distance from Key Image: ", distance

        averagePointDistance = distance/float(len(matches_subset))
        print "\t Average Distance: ", averagePointDistance

        kp1 = []
        kp2 = []

        for match in matches_subset:
            kp1.append(base_features[match.trainIdx])
            kp2.append(next_features[match.queryIdx])

        p1 = np.array([k.pt for k in kp1])
        p2 = np.array([k.pt for k in kp2])

        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        print '%d / %d  inliers/matched' % (np.sum(status), len(status))

        inlierRatio = float(np.sum(status)) / float(len(status))

        # if ( closestImage == None or averagePointDistance < closestImage['dist'] ):
        if closestImage == None or inlierRatio > closestImage['inliers']:
            closestImage            = {}
            closestImage['h']       = H
            closestImage['inliers'] = inlierRatio
            closestImage['dist']    = averagePointDistance
            closestImage['index']   = index
            closestImage['rgb']     = next_img_rgb
            closestImage['img']     = next_img
            closestImage['feat']    = next_features
            closestImage['desc']    = next_descs
            closestImage['match']   = matches_subset

    print "Closest Image Ratio: ", closestImage['inliers']

    new_images_array = images_array
    del new_images_array[closestImage['index']]  # Shortening the images array to not have the last used image



    H = closestImage['h']
    H = H / H[2, 2]
    H_inv = np.linalg.inv(H)

    if  closestImage['inliers'] > 0.1:  # and
        (min_x, min_y, max_x, max_y) = findDimensions(closestImage['img'], H_inv)

        # Adjust max_x and max_y by base img size
        max_x = max(max_x, base_img.shape[1])
        max_y = max(max_y, base_img.shape[0])

        move_h = np.matrix(np.identity(3), np.float32)

        if ( min_x < 0 ):
            move_h[0,2] += -min_x
            max_x += -min_x

        if ( min_y < 0 ):
            move_h[1,2] += -min_y
            max_y += -min_y

        print "Homography: \n", H
        print "Inverse Homography: \n", H_inv
        print "Min Points: ", (min_x, min_y)

        mod_inv_h = move_h * H_inv

        img_w = int(math.ceil(max_x))
        img_h = int(math.ceil(max_y))

        print "New Dimensions: ", (img_w, img_h)

        # Warp the new image given the homography from the old image
        base_img_warp = cv2.warpPerspective(base_img_rgb, move_h, (img_w, img_h))
        print "Warped base image"

        # utils.showImage(base_img_warp, scale=(0.2, 0.2), timeout=5000)
        # cv2.destroyAllWindows()

        next_img_warp = cv2.warpPerspective(closestImage['rgb'], mod_inv_h, (img_w, img_h))
        print "Warped next image"

        # utils.showImage(next_img_warp, scale=(0.2, 0.2), timeout=5000)
        # cv2.destroyAllWindows()

        # Put the base image on an enlarged palette
        enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8)

        print "Enlarged Image Shape: ",  enlarged_base_img.shape
        print "Base Image Shape: ",      base_img_rgb.shape
        print "Base Image Warp Shape: ", base_img_warp.shape

        # enlarged_base_img[y:y+base_img_rgb.shape[0],x:x+base_img_rgb.shape[1]] = base_img_rgb
        # enlarged_base_img[:base_img_warp.shape[0],:base_img_warp.shape[1]] = base_img_warp

        # Create a mask from the warped image for constructing masked composite
        (ret,data_map) = cv2.threshold(cv2.cvtColor(next_img_warp, cv2.COLOR_BGR2GRAY), 0, 255, cv2.THRESH_BINARY)

        enlarged_base_img = cv2.add(enlarged_base_img, base_img_warp,
            mask=np.bitwise_not(data_map),
            dtype=cv2.CV_8U)

        # Now add the warped image
        final_img = cv2.add(enlarged_base_img, next_img_warp,
            dtype=cv2.CV_8U)

        # utils.showImage(final_img, scale=(0.2, 0.2), timeout=0)
        # cv2.destroyAllWindows()

        # Crop off the black edges
        final_gray = cv2.cvtColor(final_img, cv2.COLOR_BGR2GRAY)
        _, thresh = cv2.threshold(final_gray, 1, 255, cv2.THRESH_BINARY)
        contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        print "Found %d contours..." % (len(contours))

        max_area = 0
        best_rect = (0, 0, 0, 0)

        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)
            # print "Bounding Rectangle: ", (x,y,w,h)

            deltaHeight = h - y
            deltaWidth = w - x

            area = deltaHeight * deltaWidth

            if area > max_area and deltaHeight > 0 and deltaWidth > 0:
                max_area = area
                best_rect = (x, y, w, h)

        if ( max_area > 0 ):
            print "Maximum Contour: ", max_area
            print "Best Rectangle: ", best_rect

            final_img_crop = final_img[best_rect[1]:best_rect[1]+best_rect[3],
                    best_rect[0]:best_rect[0]+best_rect[2]]

            #utils.showImage(final_img_crop, scale=(0.2, 0.2), timeout=0)
            #cv2.destroyAllWindows()

            final_img = final_img_crop

        return stitchImages(final_img, new_images_array,  round + 1)

    else:
        return stitchImages(base_img_rgb, new_images_array, round + 1)
Exemple #15
0
def train_and_test(scale, apply_pca, ncomp_pca, detector_options, SVM_options):
    # Main program, where data is read, features computed, the classifier fit,
    # and then applied to the test data.
    start = time.time()

    # read the train and test files
    train_images_filenames = cPickle.load(
        open('train_images_filenames.dat', 'r'))
    test_images_filenames = cPickle.load(open('test_images_filenames.dat',
                                              'r'))
    train_labels = cPickle.load(open('train_labels.dat', 'r'))
    test_labels = cPickle.load(open('test_labels.dat', 'r'))

    print 'Loaded ' + str(
        len(train_images_filenames
            )) + ' training images filenames with classes ', set(train_labels)
    print 'Loaded ' + str(
        len(test_images_filenames
            )) + ' testing images filenames with classes ', set(test_labels)

    # create the detector object
    if (detector_options.descriptor == 'SIFT'):
        detector = cv2.SIFT(detector_options.nfeatures)
    elif (detector_options.descriptor == 'SURF'):
        detector = cv2.SURF(detector_options.SURF_hessian_ths)
    elif (detector_options.descriptor == 'ORB'):
        detector = cv2.ORB(detector_options.nfeatures)
    else:
        print 'Error: feature detector not recognized.'

    # read the just 30 train images per class
    # extract SIFT keypoints and descriptors
    # store descriptors in a python list of numpy arrays
    Train_descriptors, Train_label_per_descriptor = \
        read_and_extract_features(train_images_filenames, train_labels, detector)

    # Transform everything to numpy arrays
    D = Train_descriptors[0]
    L = np.array([Train_label_per_descriptor[0]] *
                 Train_descriptors[0].shape[0])
    for i in range(1, len(Train_descriptors)):
        D = np.vstack((D, Train_descriptors[i]))
        L = np.hstack((L,
                       np.array([Train_label_per_descriptor[i]] *
                                Train_descriptors[i].shape[0])))

    # Scale input data, and keep the scaler for later:
    stdSlr = StandardScaler().fit(D)
    if (scale == 1):
        D = stdSlr.transform(D)

    # PCA:
    pca = PCA(n_components=ncomp_pca)
    if (apply_pca == 1):
        print "Applying principal components analysis..."
        pca.fit(D)
        D = pca.transform(D)
        print "Explained variance with ", ncomp_pca , \
            " components: ", sum(pca.explained_variance_ratio_) * 100, '%'

    # Train a linear SVM classifier
    clf = train_classifier(D, L, SVM_options)

    # get all the test data and predict their labels
    accuracy = test_system(test_images_filenames, test_labels, clf, detector, \
        stdSlr, pca, apply_pca, scale, SVM_options.probability)

    end = time.time()
    running_time = end - start
    print 'Done in ' + str(running_time) + ' secs.'

    # Return accuracy and time:
    return accuracy, running_time
import numpy as np
import cv2
import json
from pyflann import *
import re
from optparse import OptionParser
import time
from sklearn import linear_model
from matplotlib import pylab
import pickle
import seaborn as sns
import pandas as pd

global allBodyParts, surf
allBodyParts = ['MouthHook', 'LeftMHhook', 'RightMHhook', 'LeftDorsalOrgan', 'RightDorsalOrgan']
surf = cv2.SURF(250, nOctaves=2, nOctaveLayers=3, extended=1)

class KeyPoint:
   def __init__(self, frame_id, x, y, angle, rel_x, rel_y, bodypart, head_x, head_y):
        self.frame_id = frame_id
        self.pt = (x, y)
        self.angle = angle
        self.rel_pt = (rel_x, rel_y)
        self.bodypart = bodypart
        self.head_pt = (head_x, head_y)

class Error_Stats:
    def __init__(self):
        self.frame_file = None

def string_split(option, opt, value, parser):
Exemple #17
0
import cv2
img = cv2.imread('D:/zw.jpg')

gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)  
dst = cv2.convertScaleAbs(gray)
#cv2.imshow('dst',dst)
#cv2.imshow('dst',dst)


s = cv2.SURF(20)
keypoints = s.detect(gray)
for k in keypoints:
    cv2.circle(img,(int(k.pt[0]),int(k.pt[1])),1,(0,255,0),-1)
cv2.imshow('SURF_features',img)
cv2.waitKey()
cv2.destroyAllWindows()
import os
import cv2
import numpy
import math


def distance(x, y):
    return (x - y)**2


def diff(feature1, feature2):
    distance_list = map(distance, feature1, feature2)
    return math.sqrt(sum(distance_list))


surf = cv2.SURF(400)
feature_directory = "/run/shm/feature"
#object_name = "pringles/"
directory = "/home/skuba/skuba_athome/object_perception/learn/PicCut/"
centers = []
#for folder_name in os.listdir(directory):#+object_name):
#    for file_name in os.listdir(directory+folder_name+'/train/'):
#        print directory+folder_name+'/train/'+file_name
#        if file_name.endswith(".jpg") or file_name.endswith(".png"):
#            print directory+folder_name+'/train/'+file_name
#            #print  directory+folder_name+file_name
#            image = cv2.imread(directory+folder_name+'/train/'+file_name, 0)
file_name = "/home/skuba/.ros/features"
filePtr = open(file_name, "r")
feature_list = []
for line in filePtr:
    pass


cap = cv2.VideoCapture(0)
ret = cap.set(3, 960)
ret = cap.set(4, 720)
cv2.namedWindow('¼ò±Ê»­', 0)
cv2.createTrackbar('·§Öµ', '¼ò±Ê»­', 203, 3000, nothing)
#cv2.createTrackbar('·­×ª', '¼ò±Ê»­', 500, 2000, nothing)
while (1):
    ret, img = cap.read()
    #im=cv2.imread('1.jpg')
    gray = cv2.cvtColor(img, 6)
    thrs1 = cv2.getTrackbarPos('·§Öµ', '¼ò±Ê»­')
    #thrs2 = cv2.getTrackbarPos('·­×ª', '¼ò±Ê»­')

    surf = cv2.SURF(thrs1)
    #detector = cv2.SIFT()
    #sift = cv2.SIFT()
    #surf.hessianThreshold = thrs2
    surf.upright = True
    kp, des = surf.detectAndCompute(gray, None)
    #keypoints = detector.detect(gray,None)
    #img = cv2.drawKeypoints(img,keypoints)
    #img = cv2.drawKeypoints(img,kp)
    img = cv2.drawKeypoints(img, kp, None, (255, 0, 0), 4)
    cv2.imshow('¼ò±Ê»­', img)
    cv2.imshow('gray', gray)
    print len(kp)
    cv2.waitKey(100)
Exemple #20
0
def getSURFDetector():
    detector = cv2.SURF(hessianThreshold=400)
    return detector
Exemple #21
0
    # Show the image
    cv2.imshow('Matched Features', out)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


# Read training and testing images
bear = cv2.imread('lab4_images/bear.jpg')
bear_test1 = cv2.imread('lab4_images/bear_test1.jpg')
bear_test2 = cv2.imread('lab4_images/bear_test2.jpg')
shoe = cv2.imread('lab4_images/shoe.jpg')
shoe_test1 = cv2.imread('lab4_images/shoe_test1.jpg')
shoe_test2 = cv2.imread('lab4_images/shoe_test2.jpg')

# SURF detector, outputs keypoints and descriptors
surf = cv2.SURF(10000)
kp_bear_surf, des_bear_surf = surf.detectAndCompute(bear, None)
kp_bear_test1_surf, des_bear_test1_surf = surf.detectAndCompute(
    bear_test1, None)
kp_bear_test2_surf, des_bear_test2_surf = surf.detectAndCompute(
    bear_test2, None)
kp_shoe_surf, des_shoe_surf = surf.detectAndCompute(shoe, None)
kp_shoe_test1_surf, des_shoe_test1_surf = surf.detectAndCompute(
    shoe_test1, None)
kp_shoe_test2_surf, des_shoe_test2_surf = surf.detectAndCompute(
    shoe_test2, None)

# ORB detector, outputs keypoints and descriptors
orb = cv2.ORB()
kp_bear_orb, des_bear_orb = orb.detectAndCompute(bear, None)
kp_bear_test1_orb, des_bear_test1_orb = orb.detectAndCompute(bear_test1, None)
Exemple #22
0
def SURFdetector(image):
    global SURF_THRESHOLD
    detector = cv2.SURF(SURF_THRESHOLD, 10, 10)
    keypoints, descriptors = detector.detectAndCompute(image, None)
    return keypoints, descriptors
Exemple #23
0
    if (options.train_annotation_file != ""):
        print "annotation_file:", options.train_annotation_file
        with open(options.train_annotation_file) as fin_annotation:
            train_annotation = json.load(fin_annotation)
    else:
        train_annotation = {}
        train_annotation["Annotations"] = []
        with open(options.train_annotation_list) as fin_annotation_list:
            for train_annotation_file in fin_annotation_list:
                train_annotation_file = os.path.join(options.project_dir,re.sub(".*/data/", "data/", train_annotation_file.strip()))
                with open(train_annotation_file) as fin_annotation:
                    tmp_train_annotation = json.load(fin_annotation)
                    train_annotation["Annotations"].extend(tmp_train_annotation["Annotations"])

    surf = cv2.SURF(int(options.hessianThreshold), nOctaves=int(options.nOctaves), nOctaveLayers=int(options.nOctaveLayers))
    class SaveClass:
        def __init__(self, votes, keypoints, descriptors, bodypart, hessianThreshold, nOctaves, nOctaveLayers):
            self.votes = votes
            self.keypoints = keypoints
            self.descriptors = descriptors
            self.bodypart = bodypart
            self.hessianThreshold = hessianThreshold
            self.nOctaves = nOctaves
            self.nOctaveLayers = nOctaveLayers

    bodypart_kp_train = []
    bodypart_desc_train = []
    bodypart_vote_train = []
    bodypart_kp_train_pos = []
    bodypart_desc_train_pos = []
Exemple #24
0
    def process(self):
        start_time = time.time()
        #if (len(sys.argv) == 2):
        #       fn = r"../resources/matlab/" + str(sys.argv[1])
        #else:
        #       fn = r"../resources/matlab/capturea.png"

        imscale = 2

        fn2 = r"../resources/matlab/capture2.png"
        fn3 = r"../resources/matlab/capture3.png"
        fn4 = r"../resources/matlab/process.png"

        fn_temp = r"../resources/matlab/out_temp.png"

        # convert RGB png to GRAY PNG
        # still keep 16-bit resolution
        #img_temp_1 = cv2.imread(fn,-1)
        #img_temp_gray = cv2.cvtColor(img_temp_1,cv2.COLOR_BGR2GRAY)
        #cv2.imwrite(fn_temp,plt.contour(img_temp_gray))

        #im = Image.open(fn).convert('L')
        #im2 = contour(im, origin='image')
        #im2.save(fn_temp)

        img = cv2.imread(self.fn)  #reads png as 8-bit
        #img = imgt[900:1200,1400:1500]
        #img = imgt
        #print img.dtype

        # if 8-bit files, which are the ones compatible with SURF
        maxIntensity = 255.0  # depends on dtype of image data
        percentContrast = 100.0 / maxIntensity

        gauss = NGA_Process_Gauss()

        gray2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        scale_img = cv2.resize(gray2, (0, 0), fx=imscale, fy=imscale)

        surf = cv2.SURF(
            hessianThreshold=5)  #SIFT(edgeThreshold=2,contrastThreshold=0.005)
        kp = surf.detect(scale_img, None)

        self.out_d['kp'] = len(kp)
        self.out_d['kp_t'] = time.time() - start_time
        print "Keypoints Found: {0}".format(len(kp))
        print "Keypoint algorithm time (s): {0:.2f}".format(time.time() -
                                                            start_time)
        #gray_rgb = cv2.cvtColor(scale_img,cv2.COLOR_BGR2GRAY)
        cnt = 0
        cnt2 = 0
        sze = 8
        tt = None
        part_cnt = 0
        hist_bins = np.array([])
        hist_bins1 = np.array([])
        hist_bins2 = np.array([])
        cor_bin = np.array([])
        kp1 = np.array([])
        kp2 = np.array([])
        kp3 = np.array([])
        kp4 = np.array([])
        gauss.img = scale_img
        for xi in range(len(kp)):
            x = kp[xi].pt[1] / imscale
            y = kp[xi].pt[0] / imscale
            if cnt == 8e12:
                gauss_cor = gauss.fit(kp[xi], True)
            else:
                gauss_cor = gauss.fit(kp[xi], False)
            cor_bin = np.append(cor_bin, gauss_cor)
            #print gauss_cor
            t_kp = cv2.KeyPoint(y, x, kp[xi].size)
            kp1 = np.append(kp1, t_kp)
            if gauss_cor > 0.2:
                contrast = gauss.contrast(kp[xi]) * percentContrast
                #print contrast
                hist_bins = np.append(hist_bins, contrast)
                if ((contrast > 3) & (contrast < 12)):
                    part_cnt = part_cnt + 1
                    kp2 = np.append(kp2, t_kp)
                    hist_bins1 = np.append(hist_bins1, contrast)
                elif (contrast <= 2):
                    kp3 = np.append(kp3, t_kp)
                    hist_bins2 = np.append(hist_bins2, contrast)
                else:
                    kp4 = np.append(kp4, t_kp)
                    hist_bins2 = np.append(hist_bins2, contrast)

        self.out_d['particles'] = part_cnt
        self.out_d['particles_t'] = time.time() - start_time
        print "Particles Found: {0}".format(part_cnt)
        print "Particle Finding time (s): {0:.2f}".format(time.time() -
                                                          start_time)
        gauss.img = gray2

        dst3 = cv2.equalizeHist(gray2)

        # found virions
        img2b = cv2.drawKeypoints(dst3, kp2, color=(0, 128, 0))
        # under particles
        img2c = cv2.drawKeypoints(img2b, kp3, color=(0, 0, 128))
        # over particles
        img2 = cv2.drawKeypoints(img2c, kp4, color=(128, 0, 0))
        #img2b = cv2.resize(img2, (0,0), fx=(1.0/imscale), fy=(1.0/imscale))
        cv2.imwrite(fn2, img2)
        self.out_d['delta_t'] = time.time() - start_time
        print "entire run time (s): {0:.2f}".format(time.time() - start_time)

        img3 = cv2.drawKeypoints(gray2, kp1, color=(128, 0, 0))
        cv2.imwrite(fn3, img3)

        fig2 = plt.figure(num=None,
                          figsize=(12, 8),
                          dpi=80,
                          facecolor='w',
                          edgecolor='k')
        ax1 = plt.axes([0.05, 0.05, 0.55, 0.9])
        plt.imshow(img2)
        plt.title("Particles Found: {0}".format(part_cnt))

        # CONTRAST HISTOGRAMS
        ax2 = plt.axes([0.7, 0.05, 0.25, 0.25])

        plt.hist(hist_bins1, 10, alpha=0.75, color='green')
        plt.hold(True)
        plt.hist(hist_bins2, 50, alpha=0.75, color='red')
        ax2.set_xlim([-10, 25])
        mu = np.mean(hist_bins1)
        sigma = np.std(hist_bins1)
        ttl = r'$\mu: {mu:0.2f},   \sigma: {sigma:0.2f}$'.format(mu=mu,
                                                                 sigma=sigma)
        #ttl = r'$\mu=' + str(mu) + r', \sigma=' + str(sigma) + '$'
        plt.title(ttl)
        #plt.subplot(1,2,2)

        ## CORR HISTOGRAM

        cor_bin_f = cor_bin[np.logical_not(np.isnan(cor_bin))]
        ax3 = plt.axes([0.7, 0.4, 0.25, 0.25])
        #plt.hist(cor_bin,10,alpha=0.75,color='blue')
        hist, bins = np.histogram(cor_bin_f.ravel(), bins=256, density=True)
        bincenters = 0.5 * (bins[1:] + bins[:-1])
        plt.plot(bincenters, hist)
        mu = np.mean(cor_bin_f)
        sigma = np.std(cor_bin_f)
        ttl = r'$\mu: {mu:0.2f},   \sigma: {sigma:0.2f}$'.format(mu=mu,
                                                                 sigma=sigma)
        #ttl = r'$\mu=' + str(mu) + r', \sigma=' + str(sigma) + '$'
        plt.title(ttl)
        #plt.subplot(1,2,2)

        #plt.show()
        plt.savefig(fn4)

        return self.out_d
Exemple #25
0
def image_process(img_last_rgb, img_curr_rgb):
  global bb_x1, bb_y1, bb_x2, bb_y2
  global bb_x1_prev, bb_y1_prev, bb_x2_prev, bb_y2_prev
  global prev_bbx_x1, prev_bbx_y1, prev_bbx_x2, prev_bbx_y2
  global inputImg1, inputImg2, input_img_prev, input_img_curr
  global flag_predict_use_SURF, flag_predict_use_Dense
  global prev_left_pan,prev_left_tilt,prev_right_pan,prev_right_tilt

  flag_predict_use_SURF = False
  flag_predict_use_Dense = False
  flag_whole_imag_test = False
  # Get BBox Ground Truth by groudtruth
  print ('index', index)
  row = ground_truth_array[index]
  print ('bbox_gt:', row)

  if len(img_curr_rgb.shape) < 3:  
    inputImg1 = cv2.cvtColor(img_last_rgb, cv.CV_GRAY2RGB)
    inputImg2 = cv2.cvtColor(img_curr_rgb, cv.CV_GRAY2RGB)      
    input_img_prev = img_last_rgb.copy()
    input_img_curr = img_curr_rgb.copy()
  else:
    inputImg1 = img_last_rgb.copy()
    inputImg2 = img_curr_rgb.copy()
    input_img_prev = cv2.cvtColor(img_last_rgb, cv2.COLOR_BGR2GRAY)
    input_img_curr = cv2.cvtColor(img_curr_rgb, cv2.COLOR_BGR2GRAY)   




  if (flag_whole_imag_test == False):
    # Save All BBox file row to tmp variables
    tmp_x1 = int(row[0])
    tmp_y1 = int(row[1])
    tmp_x2 = int(row[2])
    tmp_y2 = int(row[3])
    tmp_x3 = int(row[4])
    tmp_y3 = int(row[5])
    tmp_x4 = int(row[6])
    tmp_y4 = int(row[7])
    


    print ('eight variables', tmp_x1, tmp_y1, tmp_x2, tmp_y2, tmp_x3, tmp_y3, tmp_x4, tmp_y4)
    # Selecet the top-left and bottom-right points, 
    # due to the different foramt(sequence) of the bbox file
    min_x = min(tmp_x1, tmp_x2, tmp_x3, tmp_x4)
    min_y = min(tmp_y1, tmp_y2, tmp_y3, tmp_y4)
    max_x = max(tmp_x1, tmp_x2, tmp_x3, tmp_x4)
    max_y = max(tmp_y1, tmp_y2, tmp_y3, tmp_y4)
    print ('minX minY maxX maxY', min_x, min_y, max_x, max_y)
    bb_x1_gt = min_x
    bb_y1_gt = min_y
    bb_x2_gt = max_x
    bb_y2_gt = max_y
    width_gt = max_y - min_y
    height_gt = max_x - min_x    
  else:
    img_rows, img_cols = input_img_prev.shape
    bb_x1_gt = 1
    bb_y1_gt = 1
    bb_x2_gt = img_rows
    bb_y2_gt = img_cols
    width_gt = img_cols
    height_gt = img_rows
  print ('width_gt height_gt', width_gt, height_gt)
  print ('bb_x1_gt, bb_y1_gt, bb_x2_gt, bb_y2_gt', bb_x1_gt, bb_y1_gt, bb_x2_gt, bb_y2_gt)
  # Choose current use bbox
  if ((flag_predict_use_SURF == False) and (flag_predict_use_Dense == False)) or (index < 2):
    bb_x1 = bb_x1_gt
    bb_y1 = bb_y1_gt
    bb_x2 = bb_x2_gt
    bb_y2 = bb_y2_gt
    width = width_gt
    height = height_gt
  else:
    bb_x1 = prev_bbx_x1
    bb_y1 = prev_bbx_y1
    bb_x2 = prev_bbx_x2
    bb_y2 = prev_bbx_y2
    width = bb_y2 - bb_y1
    height = bb_x2 - bb_x1

  #print ('bb', bb_x1, bb_y1, bb_x2, bb_y2)

  

  img_curr_rgb_clone = img_curr_rgb.copy()
  cv2.rectangle(img_curr_rgb_clone, (bb_x1, bb_y1), (bb_x2, bb_y2), (0, 255, 0), 2)  # Draw ground truth bbx  
  
  # create a CLAHE object (Arguments are optional).
  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
  cl1 = clahe.apply(input_img_prev)
  input_img_prev = cl1;

  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
  cl2 = clahe.apply(input_img_curr)
  input_img_curr = cl2;

  # ------ Save BBox (x1, y1, x2, y2) with (h, w)
  img_rows, img_cols = input_img_prev.shape
  scale = 0.3
  bbox_x1 = int(max(0, bb_x1 - scale*height)) #refPt[0][1]
  bbox_x2 = int(min(bb_x2 + scale*height, img_cols)) #refPt[1][1]
  bbox_y1 = int(max(0, bb_y1 - scale*width))#refPt[0][0]
  bbox_y2 = int(min(bb_y2 + scale*width, img_rows)) #refPt[1][0]
  refPt = np.empty([2,2])
  refPt[0][1] = bbox_x1
  refPt[1][1] = bbox_x2
  refPt[0][0] = bbox_y1
  refPt[1][0] = bbox_y2
  # print bbox_x1, bbox_x2, bbox_y1, bbox_y2
  height = bbox_x2 - bbox_x1
  width = bbox_y2 - bbox_y1

  print ('bbox', bbox_x1, bbox_x2, bbox_y1, bbox_y2)
  print ('bbox_width*height', width, height)

  cv2.rectangle(img_curr_rgb_clone, (bbox_x1, bbox_y1), (bbox_x2, bbox_y2), (0, 0, 255), 2)
  str_temp = 'Ground Truth'
  cv2.putText(img_curr_rgb_clone,str_temp,(10,30), font, 0.5,(0,255,0),2)
  str_temp = '| BBox Extend'
  cv2.putText(img_curr_rgb_clone,str_temp,(130,30), font, 0.5,(0,0,255),2)
  cv2.namedWindow('Ground Truth', cv2.WINDOW_AUTOSIZE)
  total_frame = len(ground_truth_array);
  current_frame_str = 'Frame: '
  current_frame_str += str(index+1)
  current_frame_str += ' / '
  current_frame_str += str(total_frame);
  print ('img_rows', img_rows)
  cv2.putText(img_curr_rgb_clone,current_frame_str,(10, int(img_rows - 20)), font, 0.5,(255,255,255),2)
  cv2.imshow('Ground Truth',img_curr_rgb_clone)
  cv2.moveWindow('Ground Truth', 100, 100)
  #cv2.waitKey(0)
  #print bbox_x1, bbox_y1, bbox_x2, bbox_y2, height, width
  input_img_prev = input_img_prev[bbox_y1:bbox_y2, bbox_x1:bbox_x2]
  input_img_curr = input_img_curr[bbox_y1:bbox_y2, bbox_x1:bbox_x2]
  
  #cv2.namedWindow('input_img_prev', cv2.WINDOW_AUTOSIZE)
  #cv2.imshow('input_img_prev',input_img_prev) 

  #cv2.namedWindow('input_img_curr', cv2.WINDOW_AUTOSIZE)
  #cv2.imshow('input_img_curr',input_img_curr)

  #print ('input_img_prev', input_img_prev.shape)

  # ----- Detect ROI Edge ------- #
  # gray = cv2.cvtColor(input_img_prev,cv2.COLOR_BGR2GRAY)
  img_gray_for_edge = input_img_prev  
  img_gray_for_edge = cv2.equalizeHist(img_gray_for_edge)
  edges = cv2.Canny(img_gray_for_edge,50,150,apertureSize = 3)
  minLineLength = 10  
  maxLineGap = 10
  lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap)
  if lines is not None:
    for x1,y1,x2,y2 in lines[0]:
        cv2.line(img_gray_for_edge,(x1,y1),(x2,y2),(0,255,0),2)
          
  cv2.namedWindow('Edge', cv2.WINDOW_AUTOSIZE)
  cv2.imshow('Edge',img_gray_for_edge)
  # ----- Detect ROI Edge ------- #

  #------------------- PTU ----------------
  curr_left_pan = float(row[8])
  curr_left_tilt = float(row[9])
  curr_right_pan = float(row[10])
  curr_right_tilt = float(row[11])
  print('curr',curr_left_pan,curr_left_tilt,curr_right_pan,curr_right_tilt)
  print('prev',prev_left_pan,prev_left_tilt,prev_right_pan,prev_right_tilt)
  delta_left_pan =  (curr_left_pan - prev_left_pan)*3.1415926/180
  # delta_left_pan = 0
  delta_left_tilt = -(curr_left_tilt - prev_left_tilt)*3.1415926/180
  # delta_left_tilt = 0
  delta_right_pan = (curr_right_pan - prev_right_pan)
  delta_right_pan = (curr_right_pan - prev_right_pan)

  focal_length = 9000  #  7894 pixel = 300 mm ; 38um = 1 pixel
  if index > 1:    
    curr_center_x = bb_x1 + (bb_x2 - bb_x1)/2;
    curr_center_y = bb_y1 + (bb_y2 - bb_y1)/2;
    prev_center_x = bb_x1_prev + (bb_x2_prev - bb_x1_prev)/2;
    prev_center_y = bb_y1_prev + (bb_y2_prev - bb_y1_prev)/2;
    # overlay = img_curr_rgb.copy()
    cv2.namedWindow('Draw_center', cv2.WINDOW_AUTOSIZE)
    cv2.circle(img_curr_rgb_clone, (curr_center_x, curr_center_y), 2, (0, 255, 0), -1)
    cv2.circle(img_curr_rgb_clone, (prev_center_x, prev_center_y), 2, (0, 255, 255), -1)
    cv2.imshow('Draw_center',img_curr_rgb_clone)
    # -- for left PTU
    center_x_t = focal_length * (prev_center_x - focal_length*delta_left_pan) / (prev_center_x * delta_left_pan - delta_left_tilt * prev_center_y + focal_length)
    # center_x_t = prev_center_x - focal_length * delta_left_pan;
    center_y_t = focal_length * (prev_center_y - focal_length*delta_left_tilt) / (prev_center_x * delta_left_pan - delta_left_tilt * prev_center_y + focal_length)
    # center_y_t = prev_center_y - focal_length * delta_left_tilt;

    center_x_t_1 = focal_length * (curr_center_x + focal_length*delta_left_pan) / (-curr_center_x * delta_left_pan + (-delta_left_tilt) * curr_center_y + focal_length)

    center_y_t_1 = focal_length * (curr_center_y - focal_length*(-delta_left_tilt)) / (-curr_center_x * delta_left_pan + (-delta_left_tilt) * curr_center_y + focal_length)    


    print('GT_Center', curr_center_x, curr_center_y)
    print('Predict_Center', center_x_t, center_y_t)

    str_temp = 'Pan/Titlt Curr '
    str_temp += str(curr_left_pan)
    str_temp += ' | '
    str_temp += str(curr_left_tilt)
    cv2.putText(img_curr_rgb_clone,str_temp,(10,50), font, 0.5,(255,255,255),2)

    str_temp = 'Pan/Titlt Prev '
    str_temp += str(prev_left_pan)
    str_temp += ' | '
    str_temp += str(prev_left_tilt)
    cv2.putText(img_curr_rgb_clone,str_temp,(10,70), font, 0.5,(255,255,255),2)

    str_temp = 'Pan/Titlt Delta '
    str_temp += str(delta_left_pan)
    str_temp += ' | '
    str_temp += str(delta_left_tilt)
    cv2.putText(img_curr_rgb_clone,str_temp,(10,90), font, 0.5,(255,255,255),2)

    str_temp = 'Curr Center:  '
    str_temp += str(curr_center_x)
    str_temp += ' | '
    str_temp += str(curr_center_y)
    cv2.putText(img_curr_rgb_clone,str_temp,(10,110), font, 0.5,(0, 255, 0),2)

    str_temp = 'Prev Center:  '
    str_temp += str(prev_center_x)
    str_temp += ' | '
    str_temp += str(prev_center_y)
    cv2.putText(img_curr_rgb_clone,str_temp,(10,130), font, 0.5,(0, 255, 255),2)

    str_temp = 'Pred Center:  '
    str_temp += str(center_x_t)
    str_temp += ' | '
    str_temp += str(center_y_t)
    cv2.putText(img_curr_rgb_clone,str_temp,(10,150), font, 0.5,(255, 0, 0), 2)    
    cv2.circle(img_curr_rgb_clone, (int(center_x_t), int(center_y_t)), 2, (255, 0, 0), -1)

    str_temp = 'Prev Center from Current:  '
    str_temp += str(center_x_t_1)
    str_temp += ' | '
    str_temp += str(center_y_t_1)
    cv2.putText(img_curr_rgb_clone,str_temp,(10,170), font, 0.5,(125, 125, 0), 2)    
    cv2.circle(img_curr_rgb_clone, (int(center_x_t_1), int(center_y_t_1)), 2, (125, 125, 0), -1)

    #img_last_rgb
    h1, w1 = img_last_rgb.shape[:2]
    h2, w2 = img_curr_rgb_clone.shape[:2]
    #create empty matrix
    vis = np.zeros((max(h1, h2), w1+w2,3), np.uint8)

    #combine 2 images
    vis[:h1, :w1,:3] = img_last_rgb
    vis[:h2, w1:w1+w2,:3] = img_curr_rgb_clone
    # add frame text to combined img
    total_frame = len(ground_truth_array);
    current_frame_str = 'Frame: '
    current_frame_str += str(index)
    current_frame_str += ' / '
    current_frame_str += str(total_frame);
    print ('img_rows', img_rows)
    cv2.putText(vis,current_frame_str,(10, int(img_rows - 20)), font, 0.5,(255,255,255),2)
    cv2.imshow('Draw_center',vis)
  #----------------- PTU --------------------

  # --------------------  SURF Points --------------- #
  surf = cv2.SURF(20)
  e1 = cv2.getTickCount()
  kp, des = surf.detectAndCompute(input_img_prev,None)
  e2 = cv2.getTickCount()
  time = (e2 - e1)/ cv2.getTickFrequency()
  print ('kp', len(kp))
  
  img_curr_rgb_predict = img_curr_rgb.copy()
  if (len(kp) > 0):
    img_prev_ROI_RGB = inputImg1[bbox_y1:bbox_y2, bbox_x1:bbox_x2]
    input_img_prev_surf = cv2.drawKeypoints(img_prev_ROI_RGB,kp,None,(255,0,0),4)
    cv2.namedWindow('SURF Points', cv2.WINDOW_AUTOSIZE)
    img_prev_RGB = inputImg1.copy()
    str_temp = 'SURF Points in ROI: '
    str_temp += str(len(kp))
    str_temp += ' | Time: '
    str_temp += str(time)[:5]
    str_temp += ' s'
    cv2.putText(img_prev_RGB,str_temp,(10,30), font, 0.5,(255,255,255),2)
    img_prev_RGB[bbox_y1:bbox_y2, bbox_x1:bbox_x2] = input_img_prev_surf

    overlay = img_prev_RGB.copy()
    #cv2.circle(overlay, (166, 132), 12, (255, 0, 0), -1)
    cv2.rectangle(overlay, (bbox_x1, bbox_y1), (bbox_x2, bbox_y2), (0, 0, 255), -1)
    opacity = 0.2 
    cv2.addWeighted(overlay, opacity, img_prev_RGB, 1 - opacity, 0, img_prev_RGB)  
    cv2.imshow('SURF Points',img_prev_RGB) 

    pt2 = []
    pt2_narray = np.empty((0,2), float)
    #print ('pt2_array', pt2_narray)

    # Generate SURF points array
    index_SURF= 0
    for each_point in kp:
        pt2.append(each_point.pt)
        x = each_point.pt[0]
        y = each_point.pt[1]
        pt2_narray = np.vstack((pt2_narray, np.array([x, y])))
        index_SURF = index_SURF + 1
    pt2_narray =  np.float32(pt2_narray).reshape(-1, 1, 2)

    # Parameters for lucas kanade optical flow
    lk_params = dict( winSize  = (15,15),
                      maxLevel = 2,
                      criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
    # params for ShiTomasi corner detection
    feature_params = dict( maxCorners = 100,
                         qualityLevel = 0.3,
                         minDistance = 7,
                         blockSize = 7 )
    p0 = pt2_narray;
    p1, status, err = cv2.calcOpticalFlowPyrLK(input_img_prev, input_img_curr, p0, None, **lk_params)
    # print ('p1_type', type(p1))
    # print ('p1_size', p1.shape)  # print ('p1', p1)

    p0r, status, err = cv2.calcOpticalFlowPyrLK(input_img_curr, input_img_prev, p1, None, **lk_params)
    # print ('p0r_type', type(p1))
    # print ('p0r_size', p1.shape)

    p0_array = squeeze_pts(p0)
    p0r_array = squeeze_pts(p0r)
    #p0r_array = np.array([p0r.reshape(-1, 2)]) #.reshape(-1, 2)
    #print p0r
    fb_err = np.sqrt(np.power(p0_array - p0r_array, 2).sum(axis=1))
    # print ('fb_err.shape', fb_err.shape)  
    # print ('fb_err', fb_err)

    good = fb_err < 1
    # print (good)
    
    #d = abs(p0-p0r).reshape(-1, 2).max(-1)
    #good = d < 1  
    #print ('d.shape', d.shape)  
    #print ('d', d)
    

    # print (good)
    good_curr_points = p1[status == 1]
    good_prev_points = p0[status == 1]
    # Create some random colors
    

    img_prev_ROI_RGB = inputImg1[bbox_y1:bbox_y2, bbox_x1:bbox_x2]
    img_prev_ROI_RGB_all_points = img_prev_ROI_RGB.copy()

    img_curr_ROI_RGB = inputImg2[bbox_y1:bbox_y2, bbox_x1:bbox_x2]
    img_curr_ROI_RGB_all_points = img_curr_ROI_RGB.copy()

    vis3_good = img_curr_ROI_RGB.copy()
    # vis4_all = inputImg2.copy()
    vis4_all = cv2.addWeighted(img_prev_ROI_RGB,0.5,img_curr_ROI_RGB,0.5,1)
    #print ('p1', p1)
    opacity = 0.6

    # ---------- Prediction BBox by SURF points-------
    predict_bbox_x1 = img_cols
    predict_bbox_y1 = img_rows
    predict_bbox_x2 = 0
    predict_bbox_y2 = 0
      
    for i,(new, old, good_flag) in enumerate(zip(good_curr_points, good_prev_points, good)): 
        a,b = new.ravel()
        img_curr_ROI_RGB_all_points_clone = img_curr_ROI_RGB_all_points.copy()
        cv2.circle(img_curr_ROI_RGB_all_points_clone, (a, b), 3, color[i].tolist(), -1)
        cv2.addWeighted(img_curr_ROI_RGB_all_points_clone, opacity, img_curr_ROI_RGB_all_points, 1 - opacity, 0, img_curr_ROI_RGB_all_points)
        c,d = old.ravel() 
        cv2.circle(img_prev_ROI_RGB_all_points, (c, d), 3, color[i].tolist(), 1)
        cv2.line(vis4_all, (a,b),(c,d), color[i].tolist(), 2)
        if not (good_flag):
            continue    
        cv2.line(vis3_good, (a,b),(c,d), color[i].tolist(), 2)
        if (predict_bbox_x1 > a): predict_bbox_x1 = a
        if (predict_bbox_x2 < a): predict_bbox_x2 = a
        if (predict_bbox_y1 > b): predict_bbox_y1 = b
        if (predict_bbox_y2 < b): predict_bbox_y2 = b
        
   
    

    surf_bbox_scale = 0.5
    surf_bbox_width = predict_bbox_y2 - predict_bbox_y1
    surf_bbox_height = predict_bbox_x2 - predict_bbox_x1
    predict_bbox_surf_x1 = int((predict_bbox_x1 + bbox_x1) - surf_bbox_scale * surf_bbox_height)
    predict_bbox_surf_y1 = int((predict_bbox_y1 + bbox_y1) - surf_bbox_scale * surf_bbox_width)
    predict_bbox_surf_x2 = int((predict_bbox_x2 + bbox_x1) + surf_bbox_scale * surf_bbox_height)
    predict_bbox_surf_y2 = int((predict_bbox_y2 + bbox_y1) + surf_bbox_scale * surf_bbox_width)
    


    # Copy img_prev_ROI_RGB_all_points (ROI with Interested Points) to cl1
    #Original_Img1 = cl1.copy()
    img_prev_RGB_all_points = inputImg1.copy()
    img_prev_RGB_all_points[bbox_y1:bbox_y2, bbox_x1:bbox_x2] = img_prev_ROI_RGB_all_points

    cv2.namedWindow('Previous Image All Points', cv2.WINDOW_AUTOSIZE)
    str_temp = 'Prev. Img. | Good Points: '
    str_temp += str(len(good_prev_points))
    cv2.putText(img_prev_RGB_all_points,str_temp,(10,30), font, 0.5,(255,255,255),2)
    cv2.imshow('Previous Image All Points',img_prev_RGB_all_points) 

    # Copy img_prev_ROI_RGB_all_points (ROI with Interested Points) to cl2
    img_curr_RGB_all_points = inputImg2.copy()
    img_curr_RGB_all_points[bbox_y1:bbox_y2, bbox_x1:bbox_x2] = img_curr_ROI_RGB_all_points
    cv2.namedWindow('Current Image All Points', cv2.WINDOW_AUTOSIZE)
    str_temp = 'Curr. Img. | Good Points: '
    str_temp += str(len(good_curr_points))
    cv2.putText(img_curr_RGB_all_points,str_temp,(10,30), font, 0.5,(255,255,255),2)
    cv2.imshow('Current Image All Points',img_curr_RGB_all_points)

    inputImg2_good_points_RGB = inputImg2.copy()
    inputImg2_good_points_RGB[bbox_y1:bbox_y2, bbox_x1:bbox_x2] = vis3_good
    cv2.namedWindow('Matched Good Points', cv2.WINDOW_AUTOSIZE)
    str_temp = 'Matched Good Points: '
    str_temp += str(sum(good))
    cv2.putText(inputImg2_good_points_RGB,str_temp,(10,30), font, 0.5,(255,255,255),2)
    cv2.imshow('Matched Good Points',inputImg2_good_points_RGB) 

    inputImg2_all_points_RGB = inputImg2.copy()
    inputImg2_all_points_RGB[bbox_y1:bbox_y2, bbox_x1:bbox_x2] = vis4_all
    cv2.namedWindow('All Matched Points', cv2.WINDOW_AUTOSIZE)
    str_temp = 'Matched All Points: '
    str_temp += str(len(good_curr_points))
    cv2.putText(inputImg2_all_points_RGB,str_temp,(10,30), font, 0.5,(255,255,255),2)
    cv2.imshow('All Matched Points',inputImg2_all_points_RGB)


  # ---------------- Dense Flow -------------------- #
  e1 = cv2.getTickCount()
  flow = cv2.calcOpticalFlowFarneback(input_img_prev,input_img_curr,0.5,1,3,15,3,5,1)
  e2 = cv2.getTickCount()
  time = (e2 - e1)/ cv2.getTickFrequency()

   # print ('flow', flow)
  print ('flow_size', flow.shape)
  mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])

  # Select num_largest points by dense flow method
  img_prev_ROI_RGB = inputImg1[bbox_y1:bbox_y2, bbox_x1:bbox_x2]
  dens_points_gray = np.zeros_like(img_prev_ROI_RGB)
  print ('mag length', len(mag))
  num_largest = min(450, len(mag))  
  indices = (-mag).argpartition(num_largest, axis=None)[:num_largest]
  x, y = np.unravel_index(indices, mag.shape)
  #print ('x=', x)
  #print ('y=', y)

  # ------------------ Predict BBox by Dense points -------------------
  predict_bbox_x1 = img_cols
  predict_bbox_y1 = img_rows
  predict_bbox_x2 = 0
  predict_bbox_y2 = 0
  for i in range(0, num_largest):
    cv2.circle(dens_points_gray, (y[i], x[i]), 3, color[i].tolist(), 1)
    if (predict_bbox_x1 > y[i]): predict_bbox_x1 = y[i]
    if (predict_bbox_x2 < y[i]): predict_bbox_x2 = y[i]
    if (predict_bbox_y1 > x[i]): predict_bbox_y1 = x[i]
    if (predict_bbox_y2 < x[i]): predict_bbox_y2 = x[i]

  dense_bbox_scale = 0.5
  dense_bbox_width = predict_bbox_y2 - predict_bbox_y1
  dense_bbox_height = predict_bbox_x2 - predict_bbox_x1
  predict_bbox_dense_x1 = int((predict_bbox_x1 + bbox_x1) - dense_bbox_scale * dense_bbox_height)
  predict_bbox_dense_y1 = int((predict_bbox_y1 + bbox_y1) - dense_bbox_scale * dense_bbox_width)
  predict_bbox_dense_x2 = int((predict_bbox_x2 + bbox_x1) + dense_bbox_scale * dense_bbox_height)
  predict_bbox_dense_y2 = int((predict_bbox_y2 + bbox_y1) + dense_bbox_scale * dense_bbox_width)
  
  # Draw all BBox
  str_temp = 'Ground Truth'
  cv2.putText(img_curr_rgb_predict,str_temp,(10,30), font, 0.5,(0,255,0),2)
  str_temp = '| BBox Extend'
  cv2.putText(img_curr_rgb_predict,str_temp,(130,30), font, 0.5,(0,0,255),2)
  str_temp = '| SURF Predict'
  cv2.putText(img_curr_rgb_predict,str_temp,(250,30), font, 0.5,(255,0,0),2)
  str_temp = '| Dense Predict'
  cv2.putText(img_curr_rgb_predict,str_temp,(370,30), font, 0.5,(255,255,0),2)
  if (len(kp) > 0): 
    cv2.rectangle(img_curr_rgb_predict, (predict_bbox_surf_x1, predict_bbox_surf_y1), (predict_bbox_surf_x2, predict_bbox_surf_y2), (255, 0, 0), 2)
  cv2.rectangle(img_curr_rgb_predict, (bbox_x1, bbox_y1), (bbox_x2, bbox_y2), (0, 0, 255), 2)
  cv2.rectangle(img_curr_rgb_predict, (bb_x1, bb_y1), (bb_x2, bb_y2), (0, 255, 0), 2)  # Draw ground truth bbx
  cv2.rectangle(img_curr_rgb_predict, (predict_bbox_dense_x1, predict_bbox_dense_y1), (predict_bbox_dense_x2, predict_bbox_dense_y2), (255, 255, 0), 2)
  cv2.namedWindow('img_curr_rgb_predict', cv2.WINDOW_AUTOSIZE)
  cv2.imshow('img_curr_rgb_predict',img_curr_rgb_predict) 

  # Draw Dense Flow selected points
  img1_original_rgb = np.zeros_like(inputImg1)
  img1_original_rgb[bbox_y1:bbox_y2, bbox_x1:bbox_x2] = dens_points_gray
  cv2.namedWindow('Dense Optical Flow', cv2.WINDOW_AUTOSIZE)
  str_temp = 'ROI Dense Optical Flow Selected '
  str_temp += str(num_largest)
  str_temp += ' Points'
  cv2.putText(img1_original_rgb,str_temp,(10,30), font, 0.5,(255,255,255),2)
  
  cv2.imshow('Dense Optical Flow',img1_original_rgb)

  # Draw HSV Flow Code
  img_prev_ROI_RGB = inputImg1[bbox_y1:bbox_y2, bbox_x1:bbox_x2]
  hsv = np.zeros_like(img_prev_ROI_RGB)
  hsv[...,1] = 255
  hsv[...,0] = ang*180/np.pi/2
  hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)

  # print ('hsv_size', hsv.shape)
  # print ('hsv_type', type(hsv))
  # print ('inputImg1', inputImg1.shape)
  # print ('inputImg1_type', type(inputImg1))
   
  rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
  img1_original_rgb = np.zeros_like(inputImg1)
  img1_original_rgb[bbox_y1:bbox_y2, bbox_x1:bbox_x2] = rgb
  cv2.namedWindow('Optical Flow HSV', cv2.WINDOW_AUTOSIZE)
  str_temp = 'ROI Dense Optical Flow'
  str_temp += ' | Time: '
  str_temp += str(time)[:5]
  str_temp += ' s'
  cv2.putText(img1_original_rgb,str_temp,(10,30), font, 0.5,(255,255,255),2)
  cv2.imshow('Optical Flow HSV',img1_original_rgb)


  bbx_scale_damping = 0.8
  # Save prev bbox size
  if (flag_predict_use_SURF == True) and (index > 1):
    prev_bbx_x1 = int(prev_bbx_x1 * bbx_scale_damping + (1 - bbx_scale_damping) * predict_bbox_surf_x1)
    prev_bbx_y1 = int(prev_bbx_y1 * bbx_scale_damping + (1 - bbx_scale_damping) * predict_bbox_surf_y1)
    prev_bbx_x2 = int(prev_bbx_x2 * bbx_scale_damping + (1 - bbx_scale_damping) * predict_bbox_surf_x2)
    prev_bbx_y2 = int(prev_bbx_y2 * bbx_scale_damping + (1 - bbx_scale_damping) * predict_bbox_surf_y2)
  elif (flag_predict_use_SURF == True) and (index == 1):
    prev_bbx_x1 = predict_bbox_surf_x1
    prev_bbx_y1 = predict_bbox_surf_y1
    prev_bbx_x2 = predict_bbox_surf_x2
    prev_bbx_y2 = predict_bbox_surf_y2
  elif (flag_predict_use_Dense == True) and (index > 1):
    prev_bbx_x1 = prev_bbx_x1 * bbx_scale_damping + (1 - bbx_scale_damping) * predict_bbox_dense_x1
    prev_bbx_y1 = prev_bbx_y1 * bbx_scale_damping + (1 - bbx_scale_damping) * predict_bbox_dense_y1
    prev_bbx_x2 = prev_bbx_x2 * bbx_scale_damping + (1 - bbx_scale_damping) * predict_bbox_dense_x2
    prev_bbx_y2 = prev_bbx_y2 * bbx_scale_damping + (1 - bbx_scale_damping) * predict_bbox_dense_y2
  elif (flag_predict_use_SURF == True) and (index == 1):
    prev_bbx_x1 = predict_bbox_dense_x1
    prev_bbx_y1 = predict_bbox_dense_y1
    prev_bbx_x2 = predict_bbox_dense_x2
    prev_bbx_y2 = predict_bbox_dense_y2


  # --- Test Morphological Filter -------------#
  kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))

  prev_img_gray = cv2.cvtColor(inputImg1, cv2.COLOR_BGR2GRAY)
  curr_img_gray = cv2.cvtColor(inputImg2, cv2.COLOR_BGR2GRAY)
  # print ('kernel', kernel)

  e1_closing = cv2.getTickCount()
  img_closing = cv2.morphologyEx(curr_img_gray, cv2.MORPH_CLOSE, kernel)
  e2_closing = cv2.getTickCount()
  time_closing = (e2_closing - e1_closing)/ cv2.getTickFrequency()

  e1_opening = cv2.getTickCount()
  img_opening = cv2.morphologyEx(curr_img_gray, cv2.MORPH_OPEN, kernel)  
  e2_opening = cv2.getTickCount()
  time_opening = (e2_opening - e1_opening)/ cv2.getTickFrequency()

  img_minus = img_closing - img_opening;
  time_minus = (e2_opening - e1_closing)/ cv2.getTickFrequency()

  str_temp = 'Closing Operation' 
  str_temp += ' | Time: '
  str_temp += str(time_closing)[:5]
  str_temp += ' s' 
  cv2.putText(img_closing,str_temp,(10,30), font, 0.5,(255,255,255),2)
  cv2.putText(img_closing,current_frame_str,(10, int(img_rows - 20)), font, 0.5,(255,255,255),2)
  cv2.namedWindow('Current Closing', cv2.WINDOW_AUTOSIZE)
  cv2.imshow('Current Closing',img_closing) 

  str_temp = 'Opening Operation'
  str_temp += ' | Time: '
  str_temp += str(time_opening)[:5]
  str_temp += ' s' 
  cv2.putText(img_opening,str_temp,(10,30), font, 0.5,(255,255,255),2)
  cv2.namedWindow('Current Open', cv2.WINDOW_AUTOSIZE)
  cv2.putText(img_opening,current_frame_str,(10, int(img_rows - 20)), font, 0.5,(255,255,255),2)
  cv2.imshow('Current Open',img_opening) 


  str_temp = 'Synthetical Morphological Filtering'
  str_temp += ' | Time: '
  str_temp += str(time_minus)[:5]
  str_temp += ' s' 
  cv2.namedWindow('Current Close - Open', cv2.WINDOW_AUTOSIZE)
  cv2.putText(img_minus,str_temp,(10,30), font, 0.5,(255,255,255),2)
  cv2.putText(img_minus,current_frame_str,(10, int(img_rows - 20)), font, 0.5,(255,255,255),2)
  cv2.imshow('Current Close - Open',img_minus)   


  #abs_diff_img = prev_img_gray.copy()
  abs_diff_img = cv2.absdiff(curr_img_gray, prev_img_gray)
  str_temp = 'AbsDiff '
  str_temp += ' | Time: '
  str_temp += str(time_minus)[:5]
  str_temp += ' s' 
  cv2.namedWindow('Current Difference', cv2.WINDOW_AUTOSIZE)
  cv2.imshow('Current Difference',abs_diff_img)   

  adaptive_img = cv2.adaptiveThreshold(img_minus,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,5,2)
  cv2.namedWindow('THRESH_OTSU', cv2.WINDOW_AUTOSIZE)
  cv2.imshow('THRESH_OTSU',adaptive_img)   
  # param_w = 2
  # windos_size = 2*param_w + 1;
  # img_rows, img_cols = curr_img_gray.shape
  # filter_img = curr_img_gray.copy()
  
  # print ('row, cols',img_rows, img_cols)

  # for row_ind in range(int(math.floor(windos_size)), img_rows - int(math.floor(windos_size))):
  #   for column_ind in range(int(math.floor(windos_size)), img_cols - int(math.floor(windos_size))):
  #     max_1 = 0;      
  #     curr_pixel = curr_img_gray[row_ind, column_ind]      
  #     for max_i in range(-param_w, param_w):
  #       min_1 = 999;
  #       for min_j in range(-param_w, param_w):          
  #          if curr_img_gray[row_ind + max_i + min_j, column_ind] < min_1:
  #           min_1 = curr_img_gray[row_ind + max_i + min_j, column_ind]
  #       if min_1 > max_1:
  #         max_1 = min_1

  #     max_2 = 0;
  #     for max_i in range(-param_w, param_w):
  #       min_2 = 999;
  #       for min_j in range(-param_w, param_w):
  #         if curr_img_gray[row_ind + max_i + min_j, column_ind] < min_2:
  #           min_2 = curr_img_gray[row_ind + max_i + min_j, column_ind]
  #       if min_2 > max_2:
  #         max_2 = min_2
  #   curr_img_gray[row_ind, column_ind] = curr_pixel - max(max_1, max_2)

  # cv2.namedWindow('Current positive filter', cv2.WINDOW_AUTOSIZE)
  # cv2.imshow('Current positive filter',curr_img_gray)  

        # crop_x = column_ind - math.floor(windos_size/2);
        # crop_y = row_ind - math.floor(windos_size/2);
        # crop_img = curr_img_gray[crop_y:crop_y+windos_size-1, crop_x:crop_x+windos_size-1]
  prev_left_pan = curr_left_pan
  prev_left_tilt = curr_left_tilt
  prev_right_pan = curr_right_pan
  prev_right_tilt = curr_right_tilt
  bb_x1_prev = bb_x1
  bb_y1_prev = bb_y1
  bb_x2_prev = bb_x2
  bb_y2_prev = bb_y2
Exemple #26
0
 def calc_surf1(self):
     self.surf = cv2.SURF(self.hessian_threshold, self.n_octaves)
     self.key_points1, self.features1 = self.surf.detectAndCompute(self.img_gray, None)
     self.key_positions1 = np.array([kp.pt for kp in self.key_points1])
Exemple #27
0
cv2.waitKey()

#SIFT feature detector and descriptor
sift = cv2.SIFT()

keypoints = sift.detect(gray2, None)
#keypoints, descriptors = sift.detectAndCompute(gray2, None)
img_sift = cv2.drawKeypoints(img,
                             keypoints,
                             flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

cv2.imshow('SIFT features', img_sift)
cv2.waitKey()

#SURF feature detector and descriptor
surf = cv2.SURF()

#threshold for the number of keypoints
surf.hessianThreshold = 15000
kp, ds = surf.detectAndCompute(gray2, None)
img_surf = cv2.drawKeypoints(img, kp, None, (0, 255, 0), 4)

cv2.imshow('SURF features', img_surf)
cv2.waitKey()

#FAST feature detector
fast = cv2.FastFeatureDetector()
keypoints = fast.detect(gray2, None)

#BRIEF feature descriptor
brief = cv2.DescriptorExtractor_create("BRIEF")
import cv2
from numpy import *
# read image
im = cv2.imread('../pcv_data/data/empire.jpg')

# down sample
im_lowres = cv2.pyrDown(im)

# convert to grayscale
gray = cv2.cvtColor(im_lowres, cv2.COLOR_RGB2GRAY)

# detect feature points
s = cv2.SURF()
mask = uint8(ones(gray.shape))
keypoints = s.detect(gray, mask)

# show image and points
vis = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)

for k in keypoints[::10]:
    cv2.circle(vis, (int(k.pt[0]), int(k.pt[1])), 2, (0, 255, 0), -1)
    cv2.circle(vis, (int(k.pt[0]), int(k.pt[1])), int(k.size), (0, 255, 0), 2)
cv2.imshow('local descriptors', vis)
cv2.imwrite('result_surf.jpg', vis)
cv2.waitKey()
from cv2 import SURF, imread

import cv2
import scipy.misc

img = imread('train-10/Suburb/image_0029.jpg', 0)
surf = SURF(400)
kp, des = surf.detectAndCompute(img, None)

im = cv2.imread('train-10/Suburb/image_0029.jpg')
cv2.imshow('original', im)

#s = cv2.SIFT() # SIFT
s = cv2.SURF()  # SURF
keypoints = s.detect(im)

for k in keypoints:
    cv2.circle(im, (int(k.pt[0]), int(k.pt[1])), 1, (0, 255, 0), -1)
    #cv2.circle(im,(int(k.pt[0]),int(k.pt[1])),int(k.size),(0,255,0),2)

cv2.imshow('SURF_features', im)
cv2.waitKey()
cv2.destroyAllWindows()
Exemple #30
0
                    len(matches[m]['flann']['matches']),
                    matches[m]['flann']['outliers'],
                    matches[m]['flann']['inliers'],
                    matches[m]['flann']['time'],
                ])


if __name__ == "__main__":

    if sys.argv < 2:
        exit(0)

    path = sys.argv[1]
    kp_algorithims = [
        ('sift', cv2.SIFT()),
        ('surf', cv2.SURF()),
        ('fast', cv2.FastFeatureDetector()),
        ('orb', cv2.ORB()),
    ]

    des_algorithims = [
        ('sift', cv2.SIFT()),
        ('surf', cv2.SURF()),
        ('orb', cv2.ORB()),
        #('brief', cv2.DescriptorExtractor_create("BRIEF")),
    ]

    benchmark = HackberryBenchmark(path, kp_algorithims, des_algorithims,
                                   ['jpg'])

    img1 = cv2.imread(os.path.join(path, 'f-0.jpg'), 0)