Exemplo n.º 1
0
def search_windows(img,
                   windows,
                   clf,
                   scaler,
                   color_space='RGB',
                   patch_size=(64, 64),
                   feature_extractor_params=None):
    if color_space != 'RGB':
        img = convert_color(img, color_space)

    # 1) Create an empty list to receive positive detection windows
    on_windows = []
    # 2) Iterate over all windows in the list
    for window in windows:
        # 3) Extract the test window from original image
        test_img = cv2.resize(
            img[window[0][1]:window[1][1], window[0][0]:window[1][0]],
            patch_size)
        # 4) Extract features for that window using single_img_features()
        features = extract_features(test_img, **feature_extractor_params)
        # 5) Scale extracted features to be fed to classifier
        test_features = scaler.transform(np.array(features).reshape(1, -1))
        # 6) Predict using your classifier
        prediction = clf.predict(test_features)
        # 7) If positive (prediction == 1) then save the window
        if prediction == 1:
            on_windows.append(window)
    # 8) Return windows for positive detections
    return on_windows
Exemplo n.º 2
0
def extract_features_imgs(imgs, cspace='RGB', **params):
    """Extract features from a list of images
    Useful for extracting features from training and test images

    :param imgs:
    :param bin_params: parameters for spatial binning
    :param color_params: parameters for color histogram binning
    :param hog_params: parameters for hog feature extractor
    :return:
    """
    # Create a list to append feature vectors to
    features = []
    # Iterate through the list of images
    # Read in each one by one
    # apply color conversion if other than 'RGB'
    # Apply bin_spatial() to get spatial color features
    # Apply color_hist() to get color histogram features
    # Get HOG features
    # Append the new feature vector to the features list
    # Return list of feature vectors
    for image in imgs:
        if type(image) is str:
            image = imread(image)

        if cspace is not 'RGB':
            image = convert_color(image, cspace)

        features.append(extract_features(image, **params))

    return features
Exemplo n.º 3
0
def find_cars(img, ystart, ystop, scale, clf, X_scaler, cspace, spatial_size, hist_bins,
              orient, pix_per_cell, cell_per_block, hog_channel):
    '''Detect vehicles and return containing boxes'''
    img = img.astype(np.float32)/255
    img_tosearch = img[ystart:ystop, :, :]
    ctrans_tosearch = convert_color(img_tosearch, cspace=cspace)
    if scale != 1:
        imshape = ctrans_tosearch.shape
        ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
    ch1 = ctrans_tosearch[:, :, 0]
    ch2 = ctrans_tosearch[:, :, 1]
    ch3 = ctrans_tosearch[:, :, 2]
    # Define blocks and steps as above
    nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
    nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
    # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
    window = 64
    nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
    cells_per_step = 2  # Instead of overlap, define how many cells to step
    nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
    nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1
    # Compute individual channel HOG features for the entire image
    if hog_channel == 'ALL':
        hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
        hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
        hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
    else:
        channels = [ch1, ch2, ch3]
        hog = get_hog_features(channels[hog_channel], orient, pix_per_cell, cell_per_block, feature_vec=False)
    boxes = []
    for xb in range(nxsteps):
        for yb in range(nysteps):
            ypos = yb*cells_per_step
            xpos = xb*cells_per_step
            # Extract HOG for this patch
            if hog_channel == 'ALL':
                hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
                hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
                hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
                hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
            else:
                hog_features = hog[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
            xleft = xpos*pix_per_cell
            ytop = ypos*pix_per_cell
            # Extract the image patch
            subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64, 64))
            # Get color features
            spatial_features = bin_spatial(subimg, size=spatial_size)
            hist_features = color_hist(subimg, nbins=hist_bins)
            # Scale features and make a prediction
            test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
            test_prediction = clf.predict(test_features)
            if test_prediction == 1:
                xbox_left = np.int(xleft*scale)
                ytop_draw = np.int(ytop*scale)
                win_draw = np.int(window*scale)
                boxes.append(((xbox_left, ytop_draw+ystart), (xbox_left+win_draw, ytop_draw+win_draw+ystart)))
    return boxes
Exemplo n.º 4
0
def extract_features_from_files(img_files, **kwargs):
    """
    extract features from a list of images
    :param img_files:
    :param kwargs:
    :return:
    """

    color_conversion = kwargs.get("color_conversion", 'RGB2YCrCb')
    orient = kwargs.get("orient", 9)
    pix_per_cell = kwargs.get("pix_per_cell", 8)
    cell_per_block = kwargs.get("cell_per_block", 2)
    hog_channel = kwargs.get("hog_channel", 0)
    spatial_size = kwargs.get("spatial_size", (32, 32))
    hist_bins = kwargs.get("hist_bins", 32)

    # Create a list to append feature vectors to
    features = []
    # Iterate through the list of images
    for file in img_files:
        image = mpimg.imread(file)
        # apply color conversion if other than 'RGB'
        feature_image = convert_color(image, conv=color_conversion)

        # Call get_hog_features() with vis=False, feature_vec=True
        if hog_channel == 'ALL':
            hog_features = []
            for channel in range(feature_image.shape[2]):
                hog_features.append(
                    get_hog_features(feature_image[:, :, channel],
                                     orient,
                                     pix_per_cell,
                                     cell_per_block,
                                     visualize=False,
                                     feature_vec=True))
            hog_features = np.ravel(hog_features)
        else:
            hog_features = get_hog_features(feature_image[:, :, hog_channel],
                                            orient,
                                            pix_per_cell,
                                            cell_per_block,
                                            visualize=False,
                                            feature_vec=True)

        # Extract the image patch
        sub_img = cv2.resize(feature_image, (64, 64))

        # Get color features
        spatial_features = bin_spatial(sub_img, size=spatial_size)
        hist_features = color_hist(sub_img, nbins=hist_bins)
        image_features = np.hstack(
            (hog_features, spatial_features, hist_features))

        # Append the new feature vector to the features list
        features.append(image_features)
    return features
Exemplo n.º 5
0
def extract_feature(image,
                    color_space='RGB',
                    spatial_size=(32, 32),
                    hist_bins=32,
                    orient=9,
                    pix_per_cell=8,
                    cell_per_block=2,
                    hog_channel=0,
                    spatial_feat=True,
                    hist_feat=True,
                    hog_feat=True):
    """
    Extract features of an image
    """
    # Create a list to contain different feature for later concatenation
    sub_features = []
    feature_image = tu.convert_color(image, color_space)

    # Extract spatial binning features
    if spatial_feat:
        spatial_features = fx.bin_spatial(feature_image, size=spatial_size)
        sub_features.append(spatial_features)

    # Extract color histogram features
    if hist_feat:
        hist_features = fx.color_hist(feature_image, nbins=hist_bins)
        sub_features.append(hist_features)

    # Extract HOG features
    if hog_feat:
        if hog_channel == 'ALL':
            hog_features = []
            for channel in range(feature_image.shape[2]):
                hog_features.append(
                    fx.get_hog_features(feature_image[:, :, channel],
                                        orient,
                                        pix_per_cell,
                                        cell_per_block,
                                        vis=False,
                                        feature_vec=True))
            hog_features = np.ravel(hog_features)
        else:
            hog_features = fx.get_hog_features(feature_image[:, :,
                                                             hog_channel],
                                               orient,
                                               pix_per_cell,
                                               cell_per_block,
                                               vis=False,
                                               feature_vec=True)
        sub_features.append(hog_features)
    return np.concatenate(sub_features)
Exemplo n.º 6
0
def format_draw(card: Dict, who: int, order: int) -> Optional[str]:
    """ Json message server sends when card is dealt """
    def convert_rank(rank):
        if rank is None:
            return -1
        elif rank > -1:
            return rank + 1
        else:
            return rank

    suit = utils.convert_color(card['color'])
    rank = convert_rank(card['rank'])
    who = str(who)
    order = str(order)

    return '{{"type":"draw","who":{0},"rank":{1},"suit":{2},"order":{3}}}'.format(
        who, rank, suit, order)
Exemplo n.º 7
0
 def transform(self, X):
     print(X.shape)
     params = self.__construct_params(**self.get_params(deep=False))
     feature_list = []
     for idx in range(X.shape[0]):
         #if idx % 1000 == 0:
         #    print('iter', idx)
         im = np.squeeze(X[idx])
         if params['cspace'] != 'RGB':
             im = convert_color(im, params['cspace'])
         feature_list.append(self.extract_feature(im, **params))
     print('transform done...scaling...')
     feature_vec = np.vstack(feature_list).astype(np.float64)
     # Fit a per-column scaler
     X_scaler = StandardScaler().fit(feature_vec)
     # Apply the scaler to X
     scaled_X = X_scaler.transform(feature_vec)
     print('scaling done ', scaled_X.shape)
     return scaled_X
Exemplo n.º 8
0
    def extract_image_features(self, img):
        """
        Extract features from single image
        """
        features = []
        cvt_img = convert_color(img, self.P.color_space)

        spatial_features = get_spatial_features(cvt_img, size=self.P.spatial_size)
        features.append(spatial_features)

        color_features = get_color_features(cvt_img, size=self.P.window_size,
                                            nbins=self.P.color_nbins)
        features.append(color_features)

        if self.P.window_size != (cvt_img.shape[0], cvt_img.shape[1]):
            cvt_img = cv2.resize(cvt_img, self.P.window_size)
        hog_features = get_hog_features(cvt_img, orient=self.P.orient,
                                        pix_per_cell=self.P.pix_per_cell,
                                        cell_per_block=self.P.cell_per_block)
        features.append(hog_features)
        return np.concatenate(features)
Exemplo n.º 9
0
 def fill(self, color, rect = None):
     color = utils.convert_color(color)
     self._pixels.fill(color, rect)
Exemplo n.º 10
0
def search_windows_v2(img,
                      windows,
                      clf,
                      scaler,
                      color_space='RGB',
                      spatial_size=(32, 32),
                      hist_bins=32,
                      hist_range=(0, 256),
                      orient=9,
                      pix_per_cell=8,
                      cell_per_block=2,
                      hog_channel=0,
                      spatial_feat=True,
                      hist_feat=True,
                      hog_feat=True,
                      y_start_stop=[None, None],
                      scale=0):
    draw_img = np.copy(img)
    img = img.astype(np.float32) / 255
    img_tosearch = img[y_start_stop[0]:y_start_stop[1], :, :]
    ctrans_tosearch = utils.convert_color(img_tosearch, conv='RGB2YCrCb')
    if scale != 1:
        imshape = ctrans_tosearch.shape
        ctrans_tosearch = cv2.resize(
            ctrans_tosearch,
            (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))

    ch1 = ctrans_tosearch[:, :, 0]
    ch2 = ctrans_tosearch[:, :, 1]
    ch3 = ctrans_tosearch[:, :, 2]
    # Define blocks and steps as above
    nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
    nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
    nfeat_per_block = orient * cell_per_block**2

    # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
    window = 64
    nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
    cells_per_step = 2  # Instead of overlap, define how many cells to step
    nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
    nysteps = (nyblocks - nblocks_per_window) // cells_per_step

    # Compute individual channel HOG features for the entire image
    hog1 = utils.get_hog_features(ch1,
                                  orient,
                                  pix_per_cell,
                                  cell_per_block,
                                  feature_vec=False)
    hog2 = utils.get_hog_features(ch2,
                                  orient,
                                  pix_per_cell,
                                  cell_per_block,
                                  feature_vec=False)
    hog3 = utils.get_hog_features(ch3,
                                  orient,
                                  pix_per_cell,
                                  cell_per_block,
                                  feature_vec=False)

    for xb in range(nxsteps):
        for yb in range(nysteps):
            ypos = yb * cells_per_step
            xpos = xb * cells_per_step
            # Extract HOG for this patch
            hog_feat1 = hog1[ypos:ypos + nblocks_per_window,
                             xpos:xpos + nblocks_per_window].ravel()
            hog_feat2 = hog2[ypos:ypos + nblocks_per_window,
                             xpos:xpos + nblocks_per_window].ravel()
            hog_feat3 = hog3[ypos:ypos + nblocks_per_window,
                             xpos:xpos + nblocks_per_window].ravel()
            hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))

            xleft = xpos * pix_per_cell
            ytop = ypos * pix_per_cell

            # Extract the image patch
            subimg = cv2.resize(
                ctrans_tosearch[ytop:ytop + window, xleft:xleft + window],
                (64, 64))
            # Get color features
            features = utils.single_img_features(subimg,
                                                 color_space=color_space,
                                                 spatial_size=spatial_size,
                                                 hist_bins=hist_bins,
                                                 orient=orient,
                                                 pix_per_cell=pix_per_cell,
                                                 cell_per_block=cell_per_block,
                                                 hog_channel=hog_channel,
                                                 spatial_feat=spatial_feat,
                                                 hist_feat=hist_feat,
                                                 hog_feat=hog_feat)

            X = scaler.transform(np.array(features).reshape(1, -1))
            # 6) Predict using your classifier
            prediction = clf.predict(X)
            # Scale features and make a prediction
            # test_features = X_scaler.transform(X)
            # test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
            # test_prediction = clf.predict(X)

            if prediction == 1:
                xbox_left = np.int(xleft * scale)
                ytop_draw = np.int(ytop * scale)
                win_draw = np.int(window * scale)
                cv2.rectangle(draw_img,
                              (xbox_left, ytop_draw + y_start_stop[0]),
                              (xbox_left + win_draw,
                               ytop_draw + win_draw + y_start_stop[0]),
                              (0, 0, 255), 6)

    return draw_img
Exemplo n.º 11
0
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
    draw_img = np.copy(img)
    img = img.astype(np.float32) / 255

    img_tosearch = img[ystart:ystop, :, :]
    ctrans_tosearch = utils.convert_color(img_tosearch, conv='RGB2YCrCb')
    if scale != 1:
        imshape = ctrans_tosearch.shape
        ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))

    ch1 = ctrans_tosearch[:, :, 0]
    ch2 = ctrans_tosearch[:, :, 1]
    ch3 = ctrans_tosearch[:, :, 2]

    # Define blocks and steps as above
    nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
    nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
    nfeat_per_block = orient * cell_per_block ** 2

    # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
    window = 64
    nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
    cells_per_step = 2  # Instead of overlap, define how many cells to step
    nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
    nysteps = (nyblocks - nblocks_per_window) // cells_per_step

    # Compute individual channel HOG features for the entire image
    hog1 = utils.get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
    hog2 = utils.get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
    hog3 = utils.get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)

    for xb in range(nxsteps):
        for yb in range(nysteps):
            ypos = yb * cells_per_step
            xpos = xb * cells_per_step
            # Extract HOG for this patch
            hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
            hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
            hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
            hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))

            xleft = xpos * pix_per_cell
            ytop = ypos * pix_per_cell

            # Extract the image patch
            subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))

            # Get color features
            spatial_features = utils.bin_spatial(subimg, size=spatial_size)
            hist_features = utils.color_hist(subimg, nbins=hist_bins)
            print(hist_features.shape)
            print(spatial_features.shape)
            print(hog_features.shape)
            X  = np.hstack((spatial_features, hist_features, hog_features)).reshape(-1, 1)
            print(X.shape)
            # Scale features and make a prediction
            # test_features = X_scaler.transform(X)
            # test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
            test_prediction = svc.predict(X)

            if test_prediction == 1:
                xbox_left = np.int(xleft * scale)
                ytop_draw = np.int(ytop * scale)
                win_draw = np.int(window * scale)
                cv2.rectangle(draw_img, (xbox_left, ytop_draw + ystart),
                              (xbox_left + win_draw, ytop_draw + win_draw + ystart), (0, 0, 255), 6)

    return draw_img
Exemplo n.º 12
0
              [416, 480, 1.0],
              [400, 480, 1.25],
              [424, 504, 1.25],
              [400, 496, 1.5],
              [432, 528, 1.5],
              [400, 512, 1.75],
              [432, 544, 1.75],
              [400, 528, 2.0],
              [432, 560, 2.0],
              [400, 596, 3.5],
              [464, 660, 3.5]]

for i, img in enumerate([car_example, notcar_example]):
    plt.imsave(r"./output_images/" + str(i) + "_1_spatial.jpg",
               cv2.resize(cv2.resize(img, spatial_size), (128, 128)))
    img2 = convert_color(img, cspace=cspace)
    ch1 = img2[:, :, 0]
    ch2 = img2[:, :, 1]
    ch3 = img2[:, :, 2]
    plt.imsave(r"./output_images/" + str(i) + "_2_ch1.jpg", ch1, cmap='gray')
    plt.imsave(r"./output_images/" + str(i) + "_2_ch2.jpg", ch2, cmap='gray')
    plt.imsave(r"./output_images/" + str(i) + "_2_ch3.jpg", ch3, cmap='gray')
    hog_feats, hog_im = get_hog_features(img, orient, pix_per_cell, cell_per_block,
                     vis=True, feature_vec=True)
    plt.imsave(r"./output_images/" + str(i) + "_3_hog.jpg", hog_im, cmap='gray')


for fname in images:
    print('processing ', fname, '...')
    img = mpimg.imread(fname)
    boxes = find_cars_multiscale(img, multiscale, clf, X_scaler, cspace, spatial_size,
Exemplo n.º 13
0
# idx = list(r['class_ids']).index(2)
images = []
idx_s = []
while True:
    idx = input()
    if idx == 'q':
        break
    idx_s.append(int(idx))
    idx = int(idx)
    res = crop_by_id(img, masks, idx)

    # display(res)
    final_image = img.copy()

    final_img = convert_color(img, res, rgb)
    images.append(final_img)
    print('enter q to break, else enter index \n')
'''
final_image = img.copy()
final_image[:,:,0] = np.where(masks[:,:,4], np.zeros((512,512)), final_image[:,:,0])
final_image[:,:,1] = np.where(masks[:,:,4], np.zeros((512,512)), final_image[:,:,1])
final_image[:,:,2] = np.where(masks[:,:,4], np.zeros((512,512)), final_image[:,:,2])

display(final_image)
'''

l = len(images)

if l == 1:
    for idx, i in enumerate(images):
Exemplo n.º 14
0
 def draw_line(self, p1, p2, color, thickness = 1):
     color = utils.convert_color(color)
     pygame.draw.line(p1, p2, color, thickness)
Exemplo n.º 15
0
    label_descriptions = json.load(f)

vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_FRAME_WIDTH, 1200)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 800)

while (True):

    ret, frame = vid.read()

    try:
        img, r = masker_np(frame)
        masks, rois = convert_to_mask(img, r)
        idx = list(r['class_ids']).index(2)
        res = crop_by_id(img, masks, idx)
        final_img = convert_color(img, res, [244, 196, 48])
        img[:, :, 0] = np.where(masks[:, :, idx], np.zeros((512, 512)),
                                img[:, :, 0])
        img[:, :, 1] = np.where(masks[:, :, idx], np.zeros((512, 512)),
                                img[:, :, 1])
        img[:, :, 2] = np.where(masks[:, :, idx], np.zeros((512, 512)),
                                img[:, :, 2])
        final_image = cv2.addWeighted(img, 1, final_img, 1, 0)
        cv2.imshow('frame', cv2.resize(final_image, (1200, 800)))
    except:
        cv2.imshow('frame', cv2.resize(frame, (1200, 800)))

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# After the loop release the cap object
Exemplo n.º 16
0
def find_cars(img, scaler, model):
    """
    This function searches for vehicles in an image with a pre-trained 'model' 
    and 'scaler'. It defines an image region of interest, search windows
    at different scale and returns all search windows with a positive detection.
    :param img: Image to search.
    :param scaler: Per-column scaler fitted to feature vectors
    :param model: Pre-trained linear SVC model.
    :return: List of search windows with detected vehicles.
    """
    # Set constants
    colorspace = c.CSPACE  # Color space
    orient = c.ORIENT  # Gradient orientation
    pix_per_cell = c.PPC  # Pixels per cell
    cell_per_block = c.CPB  # Cells per block
    hog_channel = c.HCHNL  # Hog channel
    spatial_size = c.SPATIAL_SIZE
    hist_bins = c.HIST_BINS
    # Copy image
    draw_img = np.copy(img)
    search_img = convert_color(img, conv=c.CSPACE)
    # Define scales for search windows
    scales = [1.0, 1.5, 2.0, 2.5, 3.5]
    detections = []
    # Search image with search windows at different scale
    for scale in scales:
        # Get search windows for scale
        window_list = slide_window(
            img,
            x_start_stop=[None, None],
            y_start_stop=[c.YSTART, c.YSTART + (c.YSTOP - c.YSTART) * scale],
            xy_window=(int(64 * scale), int(64 * scale)),
            xy_overlap=(0.75, 0.75))
        # Search windows for cars
        for window in window_list:
            # Extract window segment from image and convert color space
            segment_tosearch = search_img[window[0][1]:window[1][1],
                                          window[0][0]:window[1][0], :]
            # Resize segement if scale > 1
            if scale != 1:
                imshape = segment_tosearch.shape
                segment_tosearch = cv2.resize(
                    segment_tosearch,
                    (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))
            # Get color channels
            ch1 = segment_tosearch[:, :, 0]
            ch2 = segment_tosearch[:, :, 1]
            ch3 = segment_tosearch[:, :, 2]
            # Compute individual channel HOG features for the entire image
            hog1 = get_hog_features(ch1,
                                    orient,
                                    pix_per_cell,
                                    cell_per_block,
                                    feature_vec=False).ravel()
            hog2 = get_hog_features(ch2,
                                    orient,
                                    pix_per_cell,
                                    cell_per_block,
                                    feature_vec=False).ravel()
            hog3 = get_hog_features(ch3,
                                    orient,
                                    pix_per_cell,
                                    cell_per_block,
                                    feature_vec=False).ravel()
            # Stack HOG features for this patch
            hog_features = np.hstack((hog1, hog2, hog3))
            # Get color features
            # spatial_features = get_color_bin_features(segment_tosearch)
            # hist_features = get_color_hist_features(segment_tosearch)
            # # Stack all features
            # test_features = np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)
            test_features = hog_features.reshape(1, -1)
            # Scale features and make a prediction
            test_features = scaler.transform(test_features)
            test_prediction = model.predict(test_features)
            # If vehicle detected, add to detections
            if test_prediction == 1:
                detections.append(window)

    return detections
Exemplo n.º 17
0
def extract_features(imgs,
                     cspace=c.CSPACE,
                     spatial_size=(32, 32),
                     hist_bins=c.HIST_BINS,
                     orient=c.ORIENT,
                     pix_per_cell=c.PPC,
                     cell_per_block=c.CPB,
                     hog_channel='ALL',
                     spatial_feat=False,
                     hist_feat=False,
                     hog_feat=True):
    # Create a list to append feature vectors to
    features = []
    # Iterate through the list of images
    for file in imgs:
        print(file)
        file_features = []
        # Read in each one by one
        image = cv2.imread(file)
        # image = mpimg.imread(file)
        # image = image*255
        # Convert color space of image
        feature_image = convert_color(image, conv=cspace)
        # Get color spacial features
        if spatial_feat == True:
            spatial_features = get_color_bin_features(feature_image,
                                                      size=spatial_size)
            file_features.append(spatial_features)
        # Get color histogram features
        if hist_feat == True:
            # Apply get_color_hist_features()
            hist_features = get_color_hist_features(feature_image,
                                                    nbins=hist_bins)
            file_features.append(hist_features)
        # Get hog features for each channel
        if hog_feat == True:
            if hog_channel == 'ALL':
                hog_features = []
                for channel in range(feature_image.shape[2]):
                    hog_features.append(
                        get_hog_features(feature_image[:, :, channel],
                                         orient,
                                         pix_per_cell,
                                         cell_per_block,
                                         vis=False,
                                         feature_vec=True))
                hog_features = np.ravel(hog_features)
            else:
                hog_features = get_hog_features(feature_image[:, :,
                                                              hog_channel],
                                                orient,
                                                pix_per_cell,
                                                cell_per_block,
                                                vis=False,
                                                feature_vec=True)
            # Add hog features to image features
            file_features.append(hog_features)
        # Add image features to overall features list
        features.append(np.concatenate(file_features))

    return features
Exemplo n.º 18
0
    label_descriptions = json.load(f)

vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_FRAME_WIDTH, 512)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 512)

while (True):

    ret, frame = vid.read()

    try:
        img, r = masker_np(frame)
        masks, rois = convert_to_mask(img, r)
        idx = list(r['class_ids']).index(2)
        res = crop_by_id(img, masks, idx)
        final_img = convert_color(img, res, [255, 105, 180])
        img[:, :, 0] = np.where(masks[:, :, idx], np.zeros((512, 512)),
                                img[:, :, 0])
        img[:, :, 1] = np.where(masks[:, :, idx], np.zeros((512, 512)),
                                img[:, :, 1])
        img[:, :, 2] = np.where(masks[:, :, idx], np.zeros((512, 512)),
                                img[:, :, 2])
        final_image = cv2.addWeighted(img, 1, final_img, 1, 0)
        cv2.imshow('frame', final_image)
    except:
        cv2.imshow('frame', cv2.resize(frame, (512, 512)))

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# After the loop release the cap object
Exemplo n.º 19
0
    def search_cars_with_option(self, img, scale, cells_per_step, ystart, ystop, conf_thresh):
        """
        Detects car bboxes of image with given scale in region of img[ystart:ystop:,:,:]
        :param img: input image
        :param scale: window scale.
        :param cells_per_step: cells per step.
        :param ystart: y-range start.
        :param ystop: y-range stop.
        :param conf_thresh: classifier confidence threshold.
        :return: list of (bbox, confidence)
        """
        cvt_img = convert_color(img, self.P.color_space)

        # Crop image on in y-region
        ystart = 0 if ystart is None else ystart
        ystop = img.shape[1] if ystop is None else ystop
        cvt_img = cvt_img[ystart:ystop,:,:]

        # Scale the image.
        if scale != 1:
            cvt_img = cv2.resize(cvt_img, (np.int(cvt_img.shape[1] / scale), np.int(cvt_img.shape[0] / scale)))

        # Define blocks and steps as above
        nxblocks = (cvt_img.shape[1] // self.P.pix_per_cell) - self.P.cell_per_block + 1
        nyblocks = (cvt_img.shape[0] // self.P.pix_per_cell) - self.P.cell_per_block + 1
        nblocks_per_window = (self.P.window_size[0] // self.P.pix_per_cell) - self.P.cell_per_block + 1
        nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
        nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1

        # Compute individual channel HOG features for the entire image
        hogs = []
        for ch in range(cvt_img.shape[2]):
            hogs.append(get_channel_hog_features(
                img=cvt_img[:,:,ch], orient=self.P.orient,
                pix_per_cell=self.P.pix_per_cell, cell_per_block=self.P.cell_per_block,
                feature_vec=False, vis=False))

        bbox_confs = []
        for xb in range(nxsteps):
            for yb in range(nysteps):
                ypos = yb * cells_per_step
                xpos = xb * cells_per_step

                hog_features = []
                for ch in range(cvt_img.shape[2]):
                    hog_features.append(
                        hogs[ch][ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel())
                hog_features = np.hstack((hog_features[0], hog_features[1], hog_features[2]))

                # Extract the image patch
                xleft = xpos * self.P.pix_per_cell
                ytop = ypos * self.P.pix_per_cell
                subimg = cv2.resize(cvt_img[ytop:ytop + self.P.window_size[0],
                                    xleft:xleft + self.P.window_size[0]],
                                    self.P.window_size)
                # Get spatial features
                spatial_features = get_spatial_features(subimg, self.P.spatial_size)
                # Get color features
                color_features = get_color_features(subimg, size=self.P.window_size, nbins=self.P.color_nbins)
                window_features = self.scaler.transform(np.hstack(
                    (spatial_features, color_features, hog_features)).reshape(1, -1))

                if self.clf.predict(window_features) == 1:
                    xbox_left = np.int(xleft * scale)
                    ytop_draw = np.int(ytop * scale)
                    box_draw = np.int(self.P.window_size[0] * scale)
                    confidence = self.clf.decision_function(window_features)[0]
                    if confidence < conf_thresh:
                        # Only consider window with confidence score >= threshold.
                        continue

                    bbox = [(xbox_left, ytop_draw+ystart), (xbox_left+box_draw,ytop_draw+ystart+box_draw)]
                    bbox_conf = (bbox, confidence)
                    bbox_confs.append(bbox_conf)
        return bbox_confs