def search_windows(img, windows, clf, scaler, color_space='RGB', 
                   spatial_size=(32, 32), hist_bins=32, 
                   hist_range=(0, 256), orient=9, 
                   pix_per_cell=8, cell_per_block=2, 
                   hog_channel=0, spatial_feat=True, 
                   hist_feat=True, hog_feat=True):

    #1) Create an empty list to receive positive detection windows
    on_windows = []
    #2) Iterate over all windows in the list
    for window in windows:
        #3) Extract the test window from original image
        test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))      
        #4) Extract features for that window using single_img_features()
        features = single_img_features(test_img, color_space=color_space, 
                            spatial_size=spatial_size, hist_bins=hist_bins, 
                            orient=orient, pix_per_cell=pix_per_cell, 
                            cell_per_block=cell_per_block, 
                            hog_channel=hog_channel, spatial_feat=spatial_feat, 
                            hist_feat=hist_feat, hog_feat=hog_feat)
        #5) Scale extracted features to be fed to classifier
        test_features = scaler.transform(np.array(features).reshape(1, -1))
        #6) Predict using your classifier
        prediction = clf.predict(test_features)
        #7) If positive (prediction == 1) then save the window
        if prediction == 1:
            on_windows.append(window)
    #8) Return windows for positive detections
    return on_windows
    def test_02_trainingse_nont(self):
        images = glob.glob('train_imgs/non-vehicles/*/*.png')
        (clf, scaler) = class_train.load()
        counter = 0
        for i in images:
            img = cnst.img_read_f(i)
            test_img = cv2.resize(img, (64, 64))
            features = single_img_features(test_img,
                                           color_space=cnst.color_space,
                                           spatial_size=cnst.spatial_size,
                                           hist_bins=cnst.hist_bins,
                                           orient=cnst.orient,
                                           pix_per_cell=cnst.pix_per_cell,
                                           cell_per_block=cnst.cell_per_block,
                                           hog_channel=cnst.hog_channel,
                                           spatial_feat=cnst.spatial_feat,
                                           hist_feat=cnst.hist_feat,
                                           hog_feat=cnst.hog_feat)

            test_features = scaler.transform(np.array(features).reshape(1, -1))
            #test_features = features
            prediction = clf.predict(test_features)
            if (prediction > 0.0):
                print("Prediction " + str(prediction) + " for " + i)
                counter = counter + 1

        print((len(images) - counter) / len(images))
Beispiel #3
0
def get_features_dataset(dataset,
                         color_space='RGB',
                         spatial_size=(32, 32),
                         hist_bins=32,
                         orient=9,
                         pix_per_cell=8,
                         cell_per_block=2,
                         hog_channel=2):
    """

    :param dataset:
    :param color_space:
    :param spatial_size:
    :param hist_bins:
    :param orient:
    :param pix_per_cell:
    :param cell_per_block:
    :param hog_channel:
    :return:
    """
    features = []

    t0 = time.time()

    # Extract features from each image in the dataset and append
    for file in dataset:
        img = mpimg.imread(file)
        features_img = single_img_features(img, color_space, spatial_size,
                                           hist_bins, orient, pix_per_cell,
                                           cell_per_block, hog_channel)
        features.append(features_img)

    t1 = time.time()
    print('Extracted %s features in %s seconds.' %
          (len(features_img), t1 - t0))

    return features
 def test_01_trainingset(self):
     images = glob.glob('train_imgs/vehicles/GTI_Far/image0000.png')
     (clf, scaler) = class_train.load()
     for i in images:
         img = cnst.img_read_f(i)
         test_img = cv2.resize(img, (64, 64))
         features = single_img_features(test_img,
                                        color_space=cnst.color_space,
                                        spatial_size=cnst.spatial_size,
                                        hist_bins=cnst.hist_bins,
                                        orient=cnst.orient,
                                        pix_per_cell=cnst.pix_per_cell,
                                        cell_per_block=cnst.cell_per_block,
                                        hog_channel=cnst.hog_channel,
                                        spatial_feat=cnst.spatial_feat,
                                        hist_feat=cnst.hist_feat,
                                        hog_feat=cnst.hog_feat)
         print(features.shape)
         print(features)
         print(len(features))
         test_features = scaler.transform(np.array(features).reshape(1, -1))
         #test_features = features
         prediction = clf.predict(test_features)
         print(str(prediction) + " : " + i)