コード例 #1
0
def main():
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ05_/_1_WIP/_1_forge/_v0_/'
    args = PARSE_ARGS(path=directory)
    var = parameters()

    # fit a per_column scaler, apply the scaler to X, use a linear SVC
    X_scaler, scaled_X, svc = classifier(args, var, to_print=True)
    '''
コード例 #2
0
def main():
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ04_/_1_WIP/_1_forge/_5_v4/'
    args = PARSE_ARGS(path=directory)
    # read in an image:
    image_to_read = [args.test + 'straight_lines1.jpg', args.test + 'straight_lines2.jpg']
    image = mpimg.imread(image_to_read[0])
    # test of different source coordinates
    test_birds_eye_view1(args, image)
    # test of different image input
    test_birds_eye_view2(args, image_to_read)
コード例 #3
0
def main():
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ05_/_1_WIP/_1_forge/_v0_/'
    args = PARSE_ARGS(path=directory)

    t = time.time()
    # list all images in Vehicle and Non-vehicle folders
    cars, notcars = list_all_images(args, to_print=True)
    print()
    print('cars   : {}'.format(cars[0].split('\\')[-1]))
    print('notcars: {}'.format(notcars[0].split('\\')[-1]))
    print(time.time() - t, 'seconds to run')
コード例 #4
0
def main():
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ05_/_1_WIP/_1_forge/_v0_/'
    args = PARSE_ARGS(path=directory)

    test_output = args.path + 'test.mp4'
    clip = VideoFileClip('project_video.mp4')
    test_clip = clip.fl_image(process_image)
    test_clip.write_videofile(test_output, audio=False)

    carslist = []
    carslist.append(VEHICLE())
    process_image(img)
コード例 #5
0
def main():
    '''
    LAYOUT: camera calibration
     . calculate mxt, dist
     . save it on the hard drive (pickle)
     . plot images
    '''
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ04_/_1_WIP/_1_forge/_5_v4/'
    args = PARSE_ARGS(path=directory)
    # return the camera matrix, distortion coefficients
    camera_dictionary = camera_calibrate(args)
    # save the camera calibration result
    pickle.dump(camera_dictionary, open(args.cali + 'calibration.p', 'wb'))
    # plot the camera calibration result
    images_plot(args, camera_dictionary)
コード例 #6
0
def main():
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ05_/_1_WIP/_1_forge/_v0_/'
    args = PARSE_ARGS(path=directory)
    var = parameters()

    # set variables
    y_start_stop = var[
        'y_start_stop']  # [400, None]  # [None, None] # [400, 656] [None, None] # Min and Max in y to search in slide_window()
    xy_window = var['xy_window']  # (128,128) (96,96) (64,64)
    overlap = var['overlap']  # 0.5

    X_scaler, scaled_X, svc = classifier(args, var, to_print=False)

    # list images to read/open
    example_images = glob.glob(args.test + '*.jpg')
    images, titles = [], []

    for count, img_src in enumerate(example_images):
        t1 = time.time()
        img = mpimg.imread(img_src)
        draw_img = np.copy(img)
        img = img.astype(np.float32) / 255
        if count % int(len(example_images) / 5) == 0:
            print('img[min: {:.2f}, max: {:.2f}]|'.format(
                np.min(img), np.max(img)),
                  end=' ')

        windows = slide_window(img,
                               x_start_stop=[None, None],
                               y_start_stop=y_start_stop,
                               xy_window=xy_window,
                               xy_overlap=(overlap, overlap))

        if count % int(len(example_images) / 5) == 0:
            print('num windows: {}|'.format(len(windows)), end=' ')

        hot_windows = search_windows(img,
                                     windows,
                                     svc,
                                     X_scaler,
                                     color_space=var['color_space'],
                                     spatial_size=var['spatial_size'],
                                     hist_bins=var['hist_bins'],
                                     orient=var['orient'],
                                     pix_per_cell=var['pix_per_cell'],
                                     cell_per_block=var['cell_per_block'],
                                     hog_channel=var['hog_channel'],
                                     spatial_feat=var['spatial_feat'],
                                     hist_feat=var['hist_feat'],
                                     hog_feat=var['hog_feat'])

        window_img = draw_boxes(draw_img,
                                hot_windows,
                                color=(0, 0, 255),
                                thick=6)
        images.append(window_img)
        titles.append('')
        if count % int(len(example_images) / 5) == 0:
            print('{:.3f} s to process 1 img searching {} windows'.format(
                time.time() - t1, len(windows)))
    # fig = plt.figure(figsize=(12,18), dpi=300)
    figsize = (15, 7)
    visualize(figsize, 3, images, titles)
コード例 #7
0
def main():
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ05_/_1_WIP/_1_forge/_v0_/'
    args = PARSE_ARGS(path=directory)
    var = parameters()

    X_scaler, scaled_X, svc = classifier(args, var, to_print=False)

    # set variables
    pix_per_cell = var['pix_per_cell']
    orient = var['orient']
    cell_per_block = var['cell_per_block']
    spatial_size = var['spatial_size']
    hist_bins = var['hist_bins']
    # = var['cell_per_block']
    # = var['cell_per_block']

    # list images to read/open
    example_images = glob.glob(args.test + '*.jpg')

    out_images, out_maps, out_titles, out_boxes = [], [], [], []

    # consider a narrower swath in y
    ystart, ystop, scale = 400, 656, 1.5  # 1 2
    # iterate over test image
    for img_src in example_images:
        img_boxes = []
        t = time.time()
        count = 0
        img = mpimg.imread(img_src)
        draw_img = np.copy(img)
        # make a heatmap of zeros
        heatmap = np.zeros_like(img[:, :, 0])
        img = img.astype(np.float32) / 255

        img_tosearch = img[ystart:ystop, :, :]
        ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
        if scale != 1:
            imshape = ctrans_tosearch.shape
            ctrans_tosearch = cv2.resize(
                ctrans_tosearch,
                (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))

        (ch1, ch2, ch3) = [ctrans_tosearch[:, :, i] for i in range(3)]

        # define blocks and steps as above
        (nyblocks, nxblocks) = [(ch1.shape[i] // pix_per_cell) - 1
                                for i in range(2)]
        nfeat_per_block = orient * cell_per_block**2
        window, cells_per_step = 64, 2  # instead of overlap, define how many cells to step
        nblocks_per_window = (window // pix_per_cell) - 1
        (nysteps, nxsteps) = [(i - nblocks_per_window) // cells_per_step
                              for i in [nyblocks, nxblocks]]

        # compute individual channel HOG features for the entire image
        (hog1, hog2, hog3) = [
            get_hog_features(i,
                             orient,
                             pix_per_cell,
                             cell_per_block,
                             feature_vec=False) for i in [ch1, ch2, ch3]
        ]

        for xb in range(nxsteps):
            for yb in range(nysteps):
                count += 1
                (ypos, xpos) = [i * cells_per_step for i in [yb, xb]]
                # extract HOG for this patch
                (hog_feat1, hog_feat2, hog_feat3) = [
                    i[ypos:ypos + nblocks_per_window,
                      xpos:xpos + nblocks_per_window].ravel()
                    for i in [hog1, hog2, hog3]
                ]
                hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))

                (ytop, xleft) = [i * pix_per_cell for i in [ypos, xpos]]

                # extract the image patch
                subimg = cv2.resize(
                    ctrans_tosearch[ytop:ytop + window, xleft:xleft + window],
                    (64, 64))

                # get color features
                spatial_features = bin_spatial(subimg, size=spatial_size)
                hist_features = color_hist(subimg, nbins=hist_bins)

            # scale features and make a prediction
            test_features = X_scaler.transform(
                np.hstack((spatial_features, hist_features,
                           hog_features)).reshape(1, -1))
            test_prediction = svc.predict(test_features)

            if test_prediction == 1:
                (xbox_left, ytop_draw, win_draw) = [
                    np.int(i * scale) for i in [xleft, ytop, window]
                ]
                cv2.rectangle(
                    draw_img, (xbox_left, ytop_draw + ystart),
                    (xbox_left + win_draw, ytop_draw + win_draw + ystart),
                    (0, 0, 255))
                img_boxes.append(
                    ((xbox_left, ytop_draw + ystart),
                     (xbox_left + win_draw, ytop_draw + win_draw + ystart)))
                heatmap[ytop_draw + ystart:ytop_draw + win_draw + ystart,
                        xbox_left:xbox_left + win_draw] += 1

        print(time.time() - t, 'seconds to run, total windows = ', count)

        out_images.append(draw_img)
        out_titles.append(img_src[-12:])

        out_images.append(heatmap)
        out_maps.append(heatmap)
        out_boxes.append(img_boxes)

    figsize = (15, 7)  # (12,24)
    visualize(figsize, 3, images, titles)
コード例 #8
0
import numpy as np
import cv2
import time
from skimage.feature import hog
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split  # sklearn v 0.18import
# from sklearn.cross_validation import train_test_split
from scipy.ndimage.measurements import label

from moviepy.editor import VideoFileClip
from IPython.display import HTML

# parameter
directory = 'D:/USER/_PROJECT_/_PRJ05_/_1_WIP/_1_forge/_v0_/'
args = PARSE_ARGS(path=directory)
var = parameters()


# L21.35 # Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(args, var, img):
    # set variables
    X_scaler, _, svc = classifier(args, var, to_print=False)
    cell_per_block = var['cell_per_block']
    hist_bins = var['hist_bins']
    hog_channel = var['hog_channel']
    orient = var['orient']
    pix_per_cell = var['pix_per_cell']
    scale = var['scale']
    spatial_size = var['spatial_size']
    ystart = var['y_start_stop'][0]
コード例 #9
0
def main():
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ04_/_1_WIP/_1_forge/_3_retro/'
    args = PARSE_ARGS(path=directory)
    # choose a Sobel kernel size
    ksize = 3
    # read images
    image = mpimg.imread(args.sand + 'signs_vehicles_xygrad.jpg')
    img_solution = mpimg.imread(args.sand + 'binary-combo-example.jpg')
    # apply each of the thresholding functions
    gradx = gradient_sobel_abs(image,
                               orient='x',
                               sobel_kernel=ksize,
                               thresh=(20, 100))
    grady = gradient_sobel_abs(image,
                               orient='y',
                               sobel_kernel=ksize,
                               thresh=(20, 100))
    mag_binary = gradient_magnitude(image,
                                    sobel_kernel=ksize,
                                    thresh=(20, 100))
    dir_binary = gradient_direction(image, sobel_kernel=15, thresh=(0.7, 1.3))

    # combine thresholds
    combined1, combined2, combined3, combined4, combined5, combined6, combined7 = [
        np.zeros_like(dir_binary) for i in range(7)
    ]
    combined1[((gradx == 1) & (grady == 1))] = 1
    combined2[((mag_binary == 1) & (dir_binary == 1))] = 1
    combined3[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) &
                                               (dir_binary == 1))] = 1
    combined4[((gradx == 1) & (grady == 1) & (mag_binary == 1))] = 1
    combined5[((gradx == 1) & (grady == 1) | (mag_binary == 1))] = 1
    combined6[((gradx == 1) & (grady == 1) & (dir_binary == 1))] = 1
    combined7[((gradx == 1) & (grady == 1) | (dir_binary == 1))] = 1

    # plot the result
    row, column = [12, 2]
    figure, axes = plt.subplots(row, column, figsize=(15, 50))
    figure.tight_layout()
    expected_result = ['Expected result', img_solution]
    list_title_image = [['Original Image', image], ['gradx', gradx],
                        ['grady', grady], ['mag_binary', mag_binary],
                        ['dir_binary', dir_binary],
                        ['comb1: gradx & grady', combined1],
                        ['comb2: mag_binary & dir_binary', combined2],
                        [
                            'comb3: <gradx & grady> OR <dir_bin & mag_bin> ',
                            combined3
                        ], ['comb4: gradx & grady & mag_binary', combined4],
                        ['comb5: gradx & grady | mag_binary', combined5],
                        ['comb6: gradx & grady & dir_binary', combined6],
                        ['comb7: gradx & grady | dir_binary', combined7]]

    count = 0
    for i, ax in enumerate(axes.flatten()):
        if i % 2 == 0:
            ax.imshow(expected_result[1], cmap='gray')
            ax.set_title(expected_result[0], fontsize=15)
        else:
            ax.imshow(list_title_image[count][1], cmap='gray')
            ax.set_title(list_title_image[count][0], fontsize=15)
            count += 1
        ax.axis('off')
コード例 #10
0
def main():
    # parameter
    directory = 'D:/USER/_PROJECT_/_PRJ05_/_1_WIP/_1_forge/_v0_/'
    args = PARSE_ARGS(path=directory)

    # list_all_images
    cars, notcars = list_all_images(args)

    # choose random car/notcar indices
    car_ind = np.random.randint(0, len(cars))
    notcar_ind = np.random.randint(0, len(notcars))

    # read in car / notcar images
    car_image = mpimg.imread(cars[car_ind])
    notcar_image = mpimg.imread(notcars[notcar_ind])

    # define feature parameters
    color_space = 'RGB'  # can be RGB HSV LUV HLS YUV YCrCb
    orient = 6
    pix_per_cell = 8
    cell_per_block = 2
    hog_channel = 0  # can be 0 1 2 or 'ALL'
    spatial_size = (16, 16)  # spatial binning dimensions
    hist_bins = 16  # number of histogram bins
    spatial_feat = True  # spatial features on or off
    hist_feat = True  # histogram features on or off
    hog_feat = True  # HOG features on or off

    car_features, car_hog_image = single_img_features(
        car_image,
        color_space=color_space,
        spatial_size=spatial_size,
        hist_bins=hist_bins,
        orient=orient,
        pix_per_cell=pix_per_cell,
        cell_per_block=cell_per_block,
        hog_channel=hog_channel,
        spatial_feat=spatial_feat,
        hist_feat=hist_feat,
        hog_feat=hog_feat,
        vis=True)
    notcar_features, notcar_hog_image = single_img_features(
        notcar_image,
        color_space=color_space,
        spatial_size=spatial_size,
        hist_bins=hist_bins,
        orient=orient,
        pix_per_cell=pix_per_cell,
        cell_per_block=cell_per_block,
        hog_channel=hog_channel,
        spatial_feat=spatial_feat,
        hist_feat=hist_feat,
        hog_feat=hog_feat,
        vis=True)

    images = [car_image, car_hog_image, notcar_image, notcar_hog_image]
    titles = ['car image', 'car HOG image', 'notcar_image', 'notcar HOG image']
    #figure = plt.figure(figsize=(12, 3))  # , dpi=80)
    #visualize(figure, 1, 4, images, titles)
    figsize = (12, 3)
    visualize(figsize, 4, images, titles)