Пример #1
0
    def extract_single_img_features(img):
        """
        combine spatial bin, color histogram and gradient histogram features for a single image
        """
        # Create a list to append feature vectors to
        features = []

        # apply color conversion if other than 'RGB'
        feature_image = Helper.change_cspace(img)

        # get hog features for either specific channel or for all channels
        if config["hog_channel"] == 'ALL':
            hog_features = []
            channels = feature_image.shape[2]
            # get features for all 3 channels
            for channel in range(channels):
                hog_features.append(
                    FeatureExtraction.get_hog_features(feature_image[:, :,
                                                                     channel],
                                                       feature_vec=True))
                hog_features = np.ravel(hog_features)
        else:
            # get features for specific channel
            hog_features = FeatureExtraction.get_hog_features(
                feature_image[:, :, config["hog_channel"]], feature_vec=True)

        # Apply bin_spatial() to get spatial color features
        bin_features = FeatureExtraction.bin_spatial(feature_image,
                                                     config["spatial_size"])

        # Apply color_hist() to get color histogram features
        color_hist_features = FeatureExtraction.color_hist(
            feature_image, config["hist_bins"])

        # concatenate all 3 types of features
        feature = np.concatenate(
            (bin_features, color_hist_features, hog_features), axis=0)

        # Append the new feature vector to the features list
        features.append(feature)

        # Return list of feature vectors
        return features
    def get_bounding_boxes(img, classifier, x_start_stop, y_start_stop):

        # get window parameters
        n_xsteps, n_ysteps, w = WindowSearch.get_window_params(img,
                                                               x_start_stop,
                                                               y_start_stop)
        n_blocks_per_window, ctrans_tosearch = w

        # get hog features for full image
        hog1, hog2, hog3 = WindowSearch.get_frame_hog(ctrans_tosearch)

        svc, scaler = classifier
        x_start, x_stop = x_start_stop
        y_start, y_stop = y_start_stop
        bounding_boxes = []

        t_start = int(time.time())

        for xb in range(n_xsteps):
            for yb in range(n_ysteps):
                y_pos = yb * config["cells_per_step"]
                x_pos = xb * config["cells_per_step"]

                # Extract HOG for this patch
                hog_feat1 = hog1[y_pos:y_pos + n_blocks_per_window, x_pos:x_pos + n_blocks_per_window].ravel()
                hog_feat2 = hog2[y_pos:y_pos + n_blocks_per_window, x_pos:x_pos + n_blocks_per_window].ravel()
                hog_feat3 = hog3[y_pos:y_pos + n_blocks_per_window, x_pos:x_pos + n_blocks_per_window].ravel()

                x_left = x_pos * config["pix_per_cell"]
                y_stop = y_pos * config["pix_per_cell"]

                # Extract the image patch
                sub_sample_img = cv.resize(
                    ctrans_tosearch[y_stop:y_stop + config["window_size"], x_left:x_left + config["window_size"]],
                    (config["window_size"], config["window_size"])
                )

                # Get color and gradient features for each image patch
                hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
                spatial_features = FeatureExtraction.bin_spatial(sub_sample_img, size=config["spatial_size"])
                hist_features = FeatureExtraction.color_hist(sub_sample_img, nbins=config["hist_bins"])

                # append merge features
                features = np.hstack((spatial_features, hist_features, hog_features))

                # normalize the features
                features = scaler.transform(np.array(features).reshape(1, -1))

                # predict the label for the features: 1 = car, 0 = not car
                predicted_labels = svc.predict(features)

                # get the bounding box for detected cars
                if predicted_labels == 1:
                    bounding_boxes.append(WindowSearch.get_box(x_start,
                                                               x_left,
                                                               y_start,
                                                               y_stop))

        t_end = int(time.time())
        print("prediction time: {}".format(t_end - t_start))

        return bounding_boxes