Пример #1
0
def haar_feature_fit(pos_dir, neg_dir):
    para = [[]]
    y = []
    classi = ensemble.AdaBoostClassifier(
        tree.DecisionTreeClassifier(max_depth=2),
        algorithm="SAMME.R",
        n_estimators=50,
        learning_rate=1.2)
    for root, director, files in os.walk(pos_dir):
        print(files)
        i = 0
        for file in files:
            i += 1
            if (i > 50):
                break
            print(file)
            image = io.imread(pos_dir + file)
            img_gray = color.rgb2gray(image)
            img_union = transform.resize(img_gray, (190, 100))
            coord, ft = haar_like_feature_coord(100, 60, feature_types[0])
            feature = haar_like_feature(img_union, 0, 0, 100, 60, ft, coord)

            para = np.array([feature])
            #y = np.array(1)
            classi.fit(para, [0])

    for root, director, files in os.walk(neg_dir):
        print(files)
        i = 0
        for file in files:
            i += 1
            if (i > 50):
                break
            print(file)
            image = io.imread(neg_dir + file)

            img_gray = color.rgb2gray(image)
            coord, ft = haar_like_feature_coord(100, 60, feature_types[0])
            feature = haar_like_feature(img_gray, 0, 0, 100, 60, ft, coord)

            para = np.array([feature])
            #y = np.array(0)
            classi.fit(para, [0])

    joblib.dump(classi, "D:/dataset/haarmodel.m")
    image = io.imread(
        "D:/dataset/ccpd_select1/242&422_86&414_80&364_235&372-0_0_16_32_11_32_26.jpg"
    )
    img_gray = color.rgb2gray(image)
    coord, ft = haar_like_feature_coord(100, 60, feature_types[0])
    feature = haar_like_feature(img_gray, 0, 0, 100, 60, ft, coord)

    test = np.array([feature])
    res = classi.predict(test)

    print(res)
Пример #2
0
    def find_best_haar_features(self, sample_patches, labels):
        feature_types = ['type-2-x', 'type-2-y']
        feature_coord, feature_type = haar_like_feature_coord(self.resized,
                                                              self.resized,
                                                              feature_types)

        data = []
        for i in range(sample_patches.shape[0]):
            features = extract_feature_image(sample_patches[i], feature_types,
                                             feature_coord=None)
            data.append(features)
        data = np.array(data)

        max_feat = min(100, data.shape[1])
        clf = RandomForestClassifier(n_estimators=1000, max_depth=None,
                                     max_features=max_feat, n_jobs=-1,
                                     random_state=13)
        clf.fit(data, labels)
        idx_sorted = np.argsort(clf.feature_importances_)[::-1]

        cdf_feature_importances = np.cumsum(
            clf.feature_importances_[idx_sorted[::-1]])
        cdf_feature_importances /= np.max(cdf_feature_importances)
        significant_feature = np.count_nonzero(
            cdf_feature_importances > self.threshold_haar)
        selected_feature_coord = feature_coord[idx_sorted[:significant_feature]]
        selected_feature_type = feature_type[idx_sorted[:significant_feature]]

        return selected_feature_coord, selected_feature_type
Пример #3
0
def compute_haar_features(feature_types, images):

    # code based on https://scikit-image.org/docs/dev/auto_examples/
    # applications/plot_haar_extraction_selection_classification.html

    # random seed for repeatability
    np.random.seed(4)

    # Build a computation graph using Dask. This allows the use of multiple
    # CPU cores later during the actual computation
    print("\n building computational graph\n")
    X = delayed(extract_feature_image(img, feature_types) for img in images)

    # Compute the result
    t_start = time()
    print("\ncomputing features\n")
    X = np.array(X.compute(scheduler='threads'))
    time_full_feature_comp = time() - t_start

    print("\ncomputation took: ", time_full_feature_comp, "\n")
    
    # Extract all possible feature coords and feature types
    feature_coord, feature_type = haar_like_feature_coord(width=images.shape[2], height=images.shape[1],
                                  feature_type=feature_types)

    return X, feature_coord, feature_type
Пример #4
0
def test_haar_like_feature_coord(feature_type, height, width, expected_coord):
    feat_coord, feat_type = haar_like_feature_coord(width, height,
                                                    feature_type)
    # convert the output to a full numpy array just for comparison
    feat_coord = np.array([hf for hf in feat_coord])
    assert_array_equal(feat_coord, expected_coord)
    assert np.all(feat_type == feature_type)
Пример #5
0
def sample():
    images = [
        np.zeros((2, 2)),
        np.zeros((2, 2)),
        np.zeros((3, 3)),
        np.zeros((3, 3)),
        np.zeros((2, 2))
    ]

    feature_types = ['type-2-x', 'type-2-y', 'type-3-x', 'type-3-y', 'type-4']
    fig, axs = plt.subplots(3, 2)
    for ax, img, feat_t in zip(np.ravel(axs), images, feature_types):
        coord, _ = haar_like_feature_coord(img.shape[0], img.shape[1], feat_t)
        haar_feature = draw_haar_like_feature(img,
                                              0,
                                              0,
                                              img.shape[0],
                                              img.shape[1],
                                              coord,
                                              max_n_features=1,
                                              random_state=0)
        ax.imshow(haar_feature)
        ax.set_title(feat_t)
        ax.set_xticks([])
        ax.set_yticks([])

    fig.suptitle('The different Haar-like feature descriptors')
    plt.axis('off')
    plt.show()
def get_haar_coord():
    feature_types = ['type-2-x', 'type-2-y', 'type-3-x', 'type-3-y', 'type-4']

    # Extract all possible features
    list_feature_coords, list_feature_types = \
        haar_like_feature_coord(width=INPUT_WIDTH, height=INPUT_HEIGHT,
                               feature_type=feature_types)
    return list_feature_coords, list_feature_types
Пример #7
0
def test_haar_like_feature_error():
    img = np.ones((5, 5), dtype=np.float32)
    img_ii = integral_image(img)

    feature_type = 'unknown_type'
    with pytest.raises(ValueError):
        haar_like_feature(img_ii, 0, 0, 5, 5, feature_type=feature_type)
        haar_like_feature_coord(5, 5, feature_type=feature_type)
        draw_haar_like_feature(img, 0, 0, 5, 5, feature_type=feature_type)

    feat_coord, feat_type = haar_like_feature_coord(5, 5, 'type-2-x')
    with pytest.raises(ValueError):
        haar_like_feature(img_ii,
                          0,
                          0,
                          5,
                          5,
                          feature_type=feat_type[:3],
                          feature_coord=feat_coord)
Пример #8
0
def test_draw_haar_like_feature(max_n_features, nnz_values):
    img = np.zeros((5, 5), dtype=np.float32)
    coord, _ = haar_like_feature_coord(5, 5, 'type-4')
    image = draw_haar_like_feature(img,
                                   0,
                                   0,
                                   5,
                                   5,
                                   coord,
                                   max_n_features=max_n_features,
                                   random_state=0)
    assert image.shape == (5, 5, 3)
    assert np.count_nonzero(image) == nnz_values
Пример #9
0
def test_haar_like_feature_precomputed(feature_type):
    img = np.ones((5, 5), dtype=np.int8)
    img_ii = integral_image(img)
    if isinstance(feature_type, list):
        # shuffle the index of the feature to be sure that we are output
        # the features in the same order
        shuffle(feature_type)
        feat_coord, feat_type = zip(
            *
            [haar_like_feature_coord(5, 5, feat_t) for feat_t in feature_type])
        feat_coord = np.concatenate(feat_coord)
        feat_type = np.concatenate(feat_type)
    else:
        feat_coord, feat_type = haar_like_feature_coord(5, 5, feature_type)
    haar_feature_precomputed = haar_like_feature(img_ii,
                                                 0,
                                                 0,
                                                 5,
                                                 5,
                                                 feature_type=feat_type,
                                                 feature_coord=feat_coord)
    haar_feature = haar_like_feature(img_ii, 0, 0, 5, 5, feature_type)
    assert_array_equal(haar_feature_precomputed, haar_feature)
Пример #10
0
def visualize_haar_f_kernel(img,f_type):
    coord,_ = haar_like_feature_coord(img.shape[0], img.shape[1],f_type)
    coord_array = np.asarray(coord)
    # pt(x,y) image(y,x)
    point_coord = [[(c2[1],c2[0]) for c1 in c for c2 in c1] for c in coord_array]
    for p in point_coord:
        img_v = np.ones((img.shape[0],img.shape[1])).astype('uint8')
        img_v = img_v* 127
        img_v = cv2.rectangle(img_v,p[0],p[1],255,-1)
        img_v = cv2.rectangle(img_v,p[2],p[3],0,-1)
        cv2.namedWindow('Image',cv2.WINDOW_NORMAL)
        cv2.imshow('Image',img_v)
        cv2.waitKey(500)
        cv2.destroyAllWindows()
Пример #11
0
    def display(self, significant_features, image):
        """ Plot the most significant haar-like features on top of an image """
        feature_coord, _ = haar_like_feature_coord(width=32, height=64)

        fig, axes = plt.subplots(3, 2)
        for idx, ax in enumerate(axes.ravel()):
            image = draw_haar_like_feature(
                image, 0, 0, 32, 64,
                [feature_coord[significant_features[idx]]])
            ax.imshow(image)
            ax.set_xticks([])
            ax.set_yticks([])

        fig.suptitle('The most important features')
        plt.show()
Пример #12
0
def haar_without_ada(train_harr,labels,feature_maps):
    coord=[]
    for feature in feature_maps:
        feat_coord,_=haar_like_feature_coord(20, 20, feature)
        coord.append(feat_coord)
    error=[]
    indicator=np.ones((np.size(train_harr,0)))
    for j in range(np.size(train_harr,1)):
        threshold=np.mean(train_harr[:,j])
        predictions=np.ones(np.size(train_harr,0))
        for k,feats in enumerate(train_harr[:,j]):
            if(feats< threshold):
                predictions[k]=-1
        error_thresh=sum(indicator[predictions!=labels])
        error.append((error_thresh))
    error=np.array(error)
    # Get 10 most minimum values of error along with their coords
    indices=np.argsort(error)
    return coord, indices
Пример #13
0
    def identify_selected_features(self, images, labels, size=(32, 64)):
        """ """
        # Compute the haar-like features for all of the different feature types
        X = delayed(self.compute(img) for img in images)
        X = np.array(X.compute(scheduler='processes'))
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            labels,
                                                            test_size=0.25)

        # Train a random forest classifier to find out which features are important for training
        clf = RandomForestClassifier(n_estimators=1000,
                                     max_depth=None,
                                     max_features=100,
                                     n_jobs=-1,
                                     random_state=0)
        clf.fit(X_train, y_train)
        significant_features = np.argsort(clf.feature_importances_)[::-1]

        cdf_feature_importances = np.cumsum(
            clf.feature_importances_[significant_features])
        cdf_feature_importances /= np.max(cdf_feature_importances)
        sig_feature_count = np.count_nonzero(cdf_feature_importances < 0.7)
        sig_feature_percent = round(
            sig_feature_count / len(cdf_feature_importances) * 100, 1)

        print((
            '{} features, or {}%, account for {}% of branch points in the random '
            'forest.').format(sig_feature_count, sig_feature_percent, 0.7))

        # Extract all possible features to be able to select the most salient
        feature_coord, feature_type = haar_like_feature_coord(width=size[0],
                                                              height=size[1])

        # Store the most informative features
        self.selected_feature_coord = feature_coord[
            significant_features[:sig_feature_count]]
        self.selected_feature_type = feature_type[
            significant_features[:sig_feature_count]]
Пример #14
0
def detection(img):
    width = img.shape[0]
    height = img.shape[1]
    print(height)
    ptr_w = 0
    ptr_h = 0
    classi = joblib.load("D:/dataset/haarmodel.m")

    while ptr_h < height:
        while ptr_w < width:
            part_img = img[ptr_w:ptr_w + 60, ptr_h:ptr_h + 100]

            img_gray = color.rgb2gray(part_img)
            io.imshow(img_gray)
            io.show()
            coord, ft = haar_like_feature_coord(100, 60, feature_types[0])
            feature = haar_like_feature(img_gray, 0, 0, 100, 60, ft, coord)
            test = np.array([feature])
            res = classi.predict(test)
            if res == 1:
                print("PL detected")
            else:
                print("NOT detected" + str(ptr_w) + "_" + str(ptr_h))
            if (ptr_w + 90) == width:
                ptr_w = width + 1
            else:
                ptr_w += 70
                if (ptr_w + 70) > width:
                    ptr_w = width - 90
        ptr_w = 0
        if (ptr_h + 100) == height:
            ptr_h = height + 1
        else:
            ptr_h += 100
            if (ptr_h + 100) > height:
                print("end")
                ptr_h = height - 100
Пример #15
0
def get_adaboost_haar_f(index,dataset,feature_type):
    f_coord,f_type = haar_like_feature_coord(dataset[0].shape[0],dataset[0].shape[1],feature_type)
    H_list = [np.asarray([get_adaboost_haar(data,ind,f_type,f_coord)[0] for data in dataset])\
              for ind in tqdm(index)]
    return np.asarray(H_list)
Пример #16
0
def adaboost_haar_vis(face_data,hfs,f_type):
    coord,_ = haar_like_feature_coord(face_data[0].shape[0], face_data[0].shape[1],f_type)
    for i in range(10):
        visualize_haar_f(face_data[i],coord[hfs[i]])
Пример #17
0
extract_feature_image(gray, feature_coord=cset)



image = draw_haar_like_feature(gray, 0, 0,
                               600,
                               600,
                               'type-2-x')


### experimenting with the HAAR like features
feature_types = ['type-2-x', 'type-2-y']
image = images[0]
image=(images[0])
feature_coord, _ = haar_like_feature_coord(12, 24, 'type-2-y')
image = draw_haar_like_feature(image, 0, 0,
                               images.shape[2],
                               images.shape[1],
                               feature_coord,
                               alpha=0.1)
plt.imshow(image)

?draw_haar_like_feature

?haar_like_feature_coord

import numpy as np
from skimage.feature import haar_like_feature_coord
from skimage.feature import draw_haar_like_feature
feature_coord, _ = haar_like_feature_coord(2, 2, 'type-3')
def featureCreate(img, fType):
    coord, _ = haar_like_feature_coord(img.shape[1], img.shape[0], fType)
    return coord
Пример #19
0
    """Extract the haar feature for the current image"""
    ii = integral_image(img)
    return haar_like_feature(ii,
                             0,
                             0,
                             ii.shape[0],
                             ii.shape[1],
                             feature_type=feature_type,
                             feature_coord=feature_coord)


TOTAL = path + path2
print(path)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
feat_coord, feat_type = haar_like_feature_coord(2, 2, feature_types)

features = draw_haar_like_feature(img, 2, 3, 39, 39, feat_coord)

#X = delayed(extract_feature_image(img, feature_types)
#for imgs in img)
#print(X)

#x= extract_feature_image(img,'type-4', feature_coord=None)

img2 = integral_image(img)
feature = haar_like_feature(img, 0, 0, 7, 7, feature_types)
print(len(feature))
print(feature)

#img = cv2.imread('D:\Documents\OPENCV\DB_PLATES\carro (1).jpg')
Пример #20
0
from skimage.feature import haar_like_feature
from skimage.feature import haar_like_feature_coord
from skimage.feature import draw_haar_like_feature

img = np.ones((6, 6), dtype=np.uint8)
print('IMAGE:')
print(img)

print('IMAGE DIMENSIONS')
print('{} * {}'.format(img.shape[0], img.shape[1]))

img_ii = integral_image(img)
print(img_ii)
coord = (np.array([[(0, 0), (0, 0)], [(0, 1), (0, 1)], [(0, 2), (0, 2)]]))
feature_coord, feature_type = haar_like_feature_coord(width=img.shape[1],
                                                      height=img.shape[0],
                                                      feature_type="type-3-x")
print('COORDINATES OF EACH FEATURE')
print(type(feature_coord))
print(feature_type)
print(len(feature_type))
print('VALUE OF IMAGE AFTER APPLYING THE FEATURES')
feature = haar_like_feature(img_ii,
                            0,
                            0,
                            img.shape[1],
                            img.shape[0],
                            feature_type[0],
                            feature_coord=feature_coord[0])
print(feature)
print(len(feature))
# the computation step
X = delayed(extract_feature_image(img, feature_types) for img in images)
# Compute the result using the "processes" dask backend
t_start = time()
X = np.array(X.compute(scheduler='processes'))
time_full_feature_comp = time() - t_start
y = np.array([1] * 100 + [0] * 100)
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    train_size=150,
                                                    random_state=0,
                                                    stratify=y)

# Extract all possible features to be able to select the most salient.
feature_coord, feature_type = \
        haar_like_feature_coord(width=images.shape[2], height=images.shape[1],
                                feature_type=feature_types)

###############################################################################
# A random forest classifier can be trained in order to select the most salient
# features, specifically for face classification. The idea is to check which
# features are the most often used by the ensemble of trees. By using only
# the most salient features in subsequent steps, we can dramatically speed up
# computation, while retaining accuracy.

# Train a random forest classifier and check performance
clf = RandomForestClassifier(n_estimators=1000,
                             max_depth=None,
                             max_features=100,
                             n_jobs=-1,
                             random_state=0)
t_start = time()
Пример #22
0
###############################################################################
# The Haar-like feature descriptors come into 5 different types as illustrated
# in the figure below. The value of the descriptor is equal to the difference
# between the sum of intensity values in the green and the red one.

images = [np.zeros((2, 2)), np.zeros((2, 2)),
          np.zeros((3, 3)), np.zeros((3, 3)),
          np.zeros((2, 2))]

feature_types = ['type-2-x', 'type-2-y',
                 'type-3-x', 'type-3-y',
                 'type-4']

fig, axs = plt.subplots(3, 2)
for ax, img, feat_t in zip(np.ravel(axs), images, feature_types):
    coord, _ = haar_like_feature_coord(img.shape[0], img.shape[1], feat_t)
    haar_feature = draw_haar_like_feature(img, 0, 0,
                                          img.shape[0],
                                          img.shape[1],
                                          coord,
                                          max_n_features=1,
                                          random_state=0)
    ax.imshow(haar_feature)
    ax.set_title(feat_t)
    ax.set_xticks([])
    ax.set_yticks([])

fig.suptitle('The different Haar-like feature descriptors')
plt.axis('off')
plt.show()
Пример #23
0
from skimage.transform import integral_image
from skimage.feature import haar_like_feature
from skimage.feature import haar_like_feature_coord
from skimage.feature import draw_haar_like_feature


    #### get the images from example dataset face recognition

images = lfw_subset()
### experimenting with the HAAR like features
feature_types = ['type-2-x', 'type-2-y']
face2rec = images[0]
plt.imshow(face2rec)
ii = integral_image(face2rec)
plt.imshow(ii)
feature_coord, _ = haar_like_feature_coord(3, 3)#, 'type-3-y')
haar_feat = draw_haar_like_feature(ii, 0, 0,
                               images.shape[2],
                               images.shape[1],
                               feature_coord,
                               alpha=0.1)

plt.imshow(face2rec)
plt.imshow(haar_feat)
plt.show()
?draw_haar_like_feature
images.shape
#?haar_like_feature_coord
#rgb2gray(img)
?haar_like_feature_coord
feature_coord
Пример #24
0
    """Extract the haar feature for the current image"""
    ii = integral_image(img)
    return haar_like_feature(ii,
                             0,
                             0,
                             ii.shape[0],
                             ii.shape[1],
                             feature_type=feature_type,
                             feature_coord=feature_coord)


with open('glasses_model.pkl', 'rb') as file:
    glasses_haar, idx = pickle.load(file)
feature_types = ['type-2-x', 'type-2-y']
feature_coord, feature_type = \
        haar_like_feature_coord(width=32, height=32,
                                feature_type=feature_types)
selected_feature_coord = feature_coord[idx]
selected_feature_type = feature_type[idx]

# Import matchlab icon
image = cv2.imread('match_lab_logo.png', cv2.IMREAD_UNCHANGED)
mask = cv2.cvtColor(image[:, :, -1], cv2.COLOR_GRAY2BGR)
overlay = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR) * (mask / 255)

# Turn on camera
videoCapture = cv2.VideoCapture(0)
frameHeight = int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
frameWidth = int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH))
minSize = min(frameHeight, frameWidth)

# Preallocate video frame and ethnicity/gender queues
def get_haar_coordinates(img):
    coord, feature_type = haar_like_feature_coord(img.shape[1], img.shape[0],
                                                  feature_types)
    return coord, feature_type
# -*- coding: utf-8 -*-
"""
Created on Mon Jul  8 15:32:47 2019

@author: marijnhazelbag
"""



images = lfw_subset()
### experimenting with the HAAR like features
feature_types = ['type-2-x', 'type-2-y']
image = images[0]
image=(images[0])
feature_coord, _ = haar_like_feature_coord(12, 24, 'type-2-y')
image = draw_haar_like_feature(image, 0, 0,
                               images.shape[2],
                               images.shape[1],
                               feature_coord,
                               alpha=0.1)
plt.imshow(image)

?draw_haar_like_feature

?haar_like_feature_coord





Пример #27
0
    list([[(13, 13), (13, 27)], [(27, 13), (27, 27)]]),
    list([[(13, 27), (13, 27)], [(27, 27), (27, 27)]])
],
                      dtype=object)

feat_type = np.array([
    'type-2-x', 'type-2-x', 'type-2-x', 'type-2-x', 'type-2-x', 'type-2-x',
    'type-2-x', 'type-2-x', 'type-2-x', 'type-2-x', 'type-2-y', 'type-2-y',
    'type-2-y', 'type-2-y', 'type-2-y', 'type-2-y', 'type-2-y', 'type-2-y',
    'type-2-y', 'type-2-y'
],
                     dtype=object)

# HAAR FILTER : version 2
radius = 30
feat_coord, feat_type = feature.haar_like_feature_coord(
    28, 28, ['type-2-x', 'type-2-y'])
#reducing the number of filters
i = 0
while i < len(feat_coord):
    if (feat_coord[i][0][1][0] - feat_coord[i][0][0][0])**2 + (
            feat_coord[i][0][1][1] - feat_coord[i][0][0][1])**2 < radius:
        feat_coord = np.delete(feat_coord, i)
        feat_type = np.delete(feat_type, i)
    else:
        i += 1
# one over 4
feat_coord = feat_coord[::4]
feat_type = feat_type[::4]
print('features', feat_coord.shape)

first = True
import numpy as np
from skimage import feature
from skimage.color import rgb2gray
from skimage.transform import integral_image
from .utils import compress_image

# Transform a [[n_images, h, l, 3]] shaped array with h, l constant
# into a [[n_images, k]] shaped array where k is a constant

DEFAULT_SAMPLE_SIZE = (40, 25)
DEFAULT_HAAR_FEATURE = 'type-3-x'
ft_set = feature.haar_like_feature_coord(*DEFAULT_SAMPLE_SIZE[::-1],
                                         feature_type=DEFAULT_HAAR_FEATURE)

BEST_HAAR_INDEXES_PATH = 'haar_best_indexes.txt'
haar_indexes = np.loadtxt(BEST_HAAR_INDEXES_PATH, dtype=int)
DEFAULT_HAAR_FEATURE_SET = ft_set[0][haar_indexes], ft_set[1][haar_indexes]


def hog(images, **kwargs):
    """Downsample and compute hog for each image"""
    # Get params
    kwargs = kwargs.copy()
    block_norm = kwargs.pop('block_norm', 'L2')
    sample_size = kwargs.pop('sample_size', DEFAULT_SAMPLE_SIZE)

    first = feature.hog(compress_image(images[0], sample_size),
                        block_norm=block_norm,
                        **kwargs)
    vectors = np.empty((len(images), *first.shape))
Пример #29
0
# in the figure below. The value of the descriptor is equal to the difference
# between the sum of intensity values in the green and the red one.

images = [
    np.zeros((2, 2)),
    np.zeros((2, 2)),
    np.zeros((3, 3)),
    np.zeros((3, 3)),
    np.zeros((2, 2))
]

feature_types = ['type-2-x', 'type-2-y', 'type-3-x', 'type-3-y', 'type-4']

fig, axs = plt.subplots(3, 2)
for ax, img, feat_t in zip(np.ravel(axs), images, feature_types):
    coord, _ = haar_like_feature_coord(img.shape[0], img.shape[1], feat_t)
    haar_feature = draw_haar_like_feature(img,
                                          0,
                                          0,
                                          img.shape[0],
                                          img.shape[1],
                                          coord,
                                          max_n_features=1,
                                          random_state=0)
    ax.imshow(haar_feature)
    ax.set_title(feat_t)
    ax.set_xticks([])
    ax.set_yticks([])

fig.suptitle('The different Haar-like feature descriptors')
plt.axis('off')
Пример #30
0
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score

from skimage.data import lfw_subset
from skimage.transform import integral_image
from skimage.feature import haar_like_feature
from skimage.feature import haar_like_feature_coord
from skimage.feature import draw_haar_like_feature

#### get the images from example dataset face recognition

images = lfw_subset()
### experimenting with the HAAR like features
feature_types = ['type-2-x', 'type-2-y']
image = images[0]
image = (images[0])
feature_coord, _ = haar_like_feature_coord(12, 24, 'type-2-y')
image = draw_haar_like_feature(image,
                               0,
                               0,
                               images.shape[2],
                               images.shape[1],
                               feature_coord,
                               alpha=0.1)
plt.imshow(image)

#?draw_haar_like_feature

#?haar_like_feature_coord
y_train = np.array([+1] * len(tr_face_data) + [-1] * len(tr_non_face_data))

# In[ ]:

X_test = np.array([
    haar_like_feature(integral_image(te_data[i]),
                      width=face_dim[0],
                      height=face_dim[1],
                      r=0,
                      c=0) for i in range(len(te_data))
])
y_test = np.array([+1] * len(te_face_data) + [-1] * len(te_non_face_data))

# In[ ]:

feature_coord, feature_type = haar_like_feature_coord(width=face_dim[0],
                                                      height=face_dim[1])

# In[ ]:

feature_coord.shape, X_train.shape, y_train.shape, X_test.shape, y_test.shape

# # Plotting HAAR Features

# In[ ]:


def plot_haar_feature(best_feature_coordinates, name='ada_boost'):
    for idx, feature_coordinate in enumerate(best_feature_coordinates):
        image = cv2.normalize(tr_data[0],
                              None,
                              alpha=0,
# Build a computation graph using dask. This allows using multiple CPUs for
# the computation step
X = delayed(extract_feature_image(img, feature_types)
            for img in images)
# Compute the result using the "processes" dask backend
t_start = time()
X = np.array(X.compute(scheduler='processes'))
time_full_feature_comp = time() - t_start
y = np.array([1] * 100 + [0] * 100)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=150,
                                                    random_state=0,
                                                    stratify=y)

# Extract all possible features to be able to select the most salient.
feature_coord, feature_type = \
        haar_like_feature_coord(width=images.shape[2], height=images.shape[1],
                                feature_type=feature_types)

###############################################################################
# A random forest classifier can be trained in order to select the most salient
# features, specifically for face classification. The idea is to check which
# features are the most often used by the ensemble of trees. By using only
# the most salient features in subsequent steps, we can dramatically speed up
# computation, while retaining accuracy.

# Train a random forest classifier and check performance
clf = RandomForestClassifier(n_estimators=1000, max_depth=None,
                             max_features=100, n_jobs=-1, random_state=0)
t_start = time()
clf.fit(X_train, y_train)
time_full_train = time() - t_start
auc_full_features = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])