Exemplo n.º 1
0
def overlapsEye(tl, eye_centers, eye_shape):
    for ctr in eye_centers:
        eye_tl = bf.center2tl(ctr, eye_shape)
        if not (((tl[0] < eye_tl[0]-eye_shape[0]) or 
                 (tl[0] > eye_tl[0]+eye_shape[0])) and
                ((tl[1] < eye_tl[1]-eye_shape[1]) or 
                 (tl[1] > eye_tl[1]+eye_shape[1]))):
                return True
    return False
Exemplo n.º 2
0
def createSVM(training, eye_centers, eye_shape):
    """ 
    Create SVM model for eye detection. Inputs are as follows:
    * training -- old image used for generating an svm model
    * eyes_centers -- list of eye_centers used for generating an svm
    * eye_shape -- shape of eye patch
    """

    print "Building SVM classifier..."
    training_gray = bf.rgb2gray(training)
    eyes = []

    for ctr in eye_centers:
        eye_gray = extractTL(training_gray, 
                             bf.center2tl(ctr, eye_shape), eye_shape)
        eyes.append(eye_gray)

    # negative exemplars from rest of image
    print "Constructing negative exemplars..."
    negs = []
    num_negs = 0
    while num_negs < 999:
        tl = (np.random.randint(0, training_gray.shape[0]), 
              np.random.randint(0, training_gray.shape[1]))
        if (isValid(training_gray, tl, eye_shape) and not
            overlapsEye(tl, eye_centers, eye_shape)):
            num_negs += 1
            negs.append(extractTL(training_gray, tl, eye_shape))

    # create more positive exemplars by applying random small 3D rotations
    print "Constructing positive exemplars..."
    num_eyes = len(eyes)
    patches = deque([eye2patch(training_gray, 
                               bf.center2tl(ctr, eye_shape), 
                               eye_shape) for ctr in eye_centers])
                    
    while num_eyes < 999:
        patch = patches.popleft()
        jittered = jitter(patch, eye_shape)
        patches.append(patch)
        new_eye = patch2eye(jittered, eye_shape)
        eyes.append(new_eye)
        num_eyes += 1

        # change lighting conditions
        eyes.append(bf.adjustExposure(new_eye, 0.5))
        eyes.append(bf.adjustExposure(new_eye, 1.5))
        num_eyes += 2

    # compute HOG for eyes and negs
    eyes_hog = []
    for eye in eyes:
        eyes_hog.append(bf.getHog(eye))

    negs_hog = []
    for neg in negs:
        negs_hog.append(bf.getHog(neg))

    # set up training dataset (eyes = -1, negs = +1)
    training_set = np.vstack((negs_hog, eyes_hog))

    training_labels = np.ones(num_eyes + num_negs)
    training_labels[num_negs:] = -1
    
    scaler = preprocessing.StandardScaler().fit(training_set)
    training_set = scaler.transform(training_set)

    # train SVM
    print "Training SVM..."
    weights = {-1 : 1.0, 1 : 1.0}
    svm = SVM.SVC(C=1.0, gamma=0.01, kernel="rbf", class_weight=weights)
    svm.fit(training_set, training_labels)

    return svm, scaler
Exemplo n.º 3
0
def searchForEyesSVM(gray, svm, scaler, eye_shape, locs):
    """ 
    Explore image on the cell level, reducing HOG calculations.
    Inputs are as follows (besides the obvious)
    * svm -- sklearn svm model; may be provided if it exists
    * scaler -- sklearn preprocessing scaler
    * locs -- list of approximate centers of eyes
    * eye_shape -- size of eye template in pixels (rows, columns)
    """

    tracker = MatchTracker()
    pq = PriorityQueue()

    eye_cells = (eye_shape[0] // 8, eye_shape[1] // 8)
    hog_computed = np.zeros((gray.shape[0] // 8, gray.shape[1] // 8),
                            dtype=np.bool)

    # distribution parameters
    blind_skip = 3

    # adjust locs
    locs[0] = (int(locs[0][0]), int(locs[0][1]))
    locs[1] = (int(locs[1][0]), int(locs[1][1]))

    print locs

    # only compute HOG on subset of image at first
    min_x = min(bf.center2tl(locs[0], eye_shape)[1], 
                bf.center2tl(locs[1], eye_shape)[1])
    max_x = max(bf.center2tl(locs[0], eye_shape)[1], 
                bf.center2tl(locs[1], eye_shape)[1])
    min_y = min(bf.center2tl(locs[0], eye_shape)[0], 
                bf.center2tl(locs[1], eye_shape)[0])
    max_y = max(bf.center2tl(locs[0], eye_shape)[0], 
                bf.center2tl(locs[1], eye_shape)[0])
    
    tl = (min_y - 4*eye_shape[0], min_x - 4*eye_shape[1])
    br = (max_y + 4*eye_shape[0], max_x + 4*eye_shape[1])

    tl_cell = bf.px2cell(tl)
    br_cell = bf.px2cell(br)

    tl = bf.cell2px(tl_cell)
    br = bf.cell2px(br_cell)

    indices = np.index_exp[tl_cell[0]:br_cell[0], tl_cell[1]:br_cell[1], :]
    indices_computed = np.index_exp[tl_cell[0]:br_cell[0], tl_cell[1]:br_cell[1]]

    hog = np.empty((gray.shape[0] // 8, gray.shape[1] // 8, 9), 
                   dtype=np.float)
    hog[indices] = bf.getHog(gray[tl[0]:br[0], tl[1]:br[1]], 
                             normalize=False, flatten=False)
    hog_computed[indices_computed] = True

    # create visited array
    visited = np.zeros((hog.shape[0]-eye_cells[0]+1,
                        hog.shape[1]-eye_cells[1]+1), dtype=np.bool)
 
    # insert provided locations and begin exploration around each one
    for loc in locs:
        tl = bf.center2tl(loc, eye_shape)
        tl = bf.px2cell(tl)

        # only proceed if valid
        if not isValid(hog, tl, eye_cells):
            continue

        # handle this point
        visited[tl[0], tl[1]] = True
        score = testWindow(hog, svm, scaler, eye_cells, tl)[0]
        pq.put_nowait((score, tl))

        if score <= 0:
            tracker.insert(score, tl)

    # search
    greedySearch(hog, hog_computed, svm, scaler, 
                 eye_cells, visited, tracker, pq)
    if tracker.isDone():
        tracker.printClusterScores()
        clusters, scores = tracker.getBigClusters()
        centers = cellTLs2ctrs(clusters, eye_shape)
        return centers, scores

    # # if needed, repeat above search technique, but with broader scope
    # print "Searching blindly."

    # hog = bf.getHog(gray, normalize=False, flatten=False)
    # hog_computed[:, :] = True

    # for i in range(20, visited.shape[0]-20, blind_skip):
    #     for j in range(20, visited.shape[1]-20, blind_skip):
    #         test = (i, j)

    #         # only proceed if valid and not visited
    #         if (not isValid(hog, test, eye_cells)) or visited[i, j]:
    #             continue

    #         # handle this point
    #         visited[i, j] = True
    #         score = testWindow(hog, svm, scaler, eye_cells, test)[0]
    #         pq.put_nowait((score, test))

    #         if score <= 0:
    #             tracker.insert(score, test)

    # greedySearch(hog, hog_computed, svm, scaler, 
    #              eye_cells, visited, tracker, pq) 
    # if tracker.isDone():
    #     tracker.printClusterScores()
    #     clusters, scores = tracker.getBigClusters()
    #     centers = cellTLs2ctrs(clusters, eye_shape)
    #     return centers, scores

    print "Did not find two good matches."
    clusters, scores = tracker.getTwoBestClusters()
    if len(clusters) == 2:
        centers = cellTLs2ctrs(clusters, eye_shape)
        return centers, scores
    else:
        return locs, [-0.1, -0.1]