def perform_distortion_correction(pickle_file, img_dir, img_files, out_dir):
    print_section_header("Correct Image Distortions")

    # Determine list of files to process
    if len(img_files) == 0:
        img_files = sorted(os.listdir(img_dir))
        img_files = [f for f in img_files if not f.startswith('.')]

    # Load camera calibration parameters
    with open(pickle_file, 'rb') as inf:
        camera_cal = pickle.load(inf)
    camera_mtx = camera_cal['camera_matrix']
    dist_coeffs = camera_cal['distortion_coefficients']

    for img_file in img_files:
        # Read an image file and correct distortions
        img_name = img_file.split('.')[0]
        img_path = img_dir + img_file
        img = mpimg.imread(img_path)
        img_undist = correct_image_distortion(img, camera_mtx, dist_coeffs)

        # Save output files
        outfile = out_dir + img_name + '.jpg'
        print("Store the undistorted image to {}".format(outfile))
        cv2.imwrite(outfile, cv2.cvtColor(img_undist, cv2.COLOR_RGB2BGR))
def perform_classifier_training(vehicle_img_dir,
                                non_vehicle_img_dir,
                                results_dir='./',
                                pickle_result=False,
                                cspace='YCrCb',
                                spatial_feat=True,
                                hist_feat=True,
                                hog_feat=True,
                                spatial_size=(32, 32),
                                hist_bins=32,
                                orient=9,
                                pix_per_cell=8,
                                cell_per_block=2,
                                hog_channel='ALL'):

    print_section_header("Train Classifier")
    X, y = extract_training_features(vehicle_img_dir, non_vehicle_img_dir,
                                     cspace, spatial_feat, hist_feat, hog_feat,
                                     spatial_size, hist_bins, orient,
                                     pix_per_cell, cell_per_block, hog_channel)
    classifier, feature_scaler, accuracy_score = train_classifier(X, y)

    trained_classifier = {
        'classifier': classifier,
        'feature_scaler': feature_scaler,
        'accuracy': accuracy_score,
        'cspace': cspace,
        'spatial_feat': spatial_feat,
        'hist_feat': hist_feat,
        'hog_feat': hog_feat,
        'spatial_size': spatial_size,
        'hist_bins': hist_bins,
        'orient': orient,
        'pix_per_cell': pix_per_cell,
        'cell_per_block': cell_per_block,
        'hog_channel': hog_channel
    }
    if pickle_result:
        suffix = '_' + cspace
        suffix += "_sp{}".format(spatial_size[0]) if spatial_feat else ''
        suffix += "_hist{}".format(hist_bins) if hist_feat else ''
        suffix += "_hog_{}_{}_{}_{}".format(orient, pix_per_cell,
                                            cell_per_block,
                                            hog_channel) if hog_feat else ''

        pickle_file = results_dir + 'classifier' + suffix + '.p'
        print("Pickle the trained classifier to {}".format(pickle_file))
        with open(pickle_file, 'wb') as out_file:
            pickle.dump(trained_classifier, out_file)
    return trained_classifier
Ejemplo n.º 3
0
def determine_window_positions(img_dir, img_files, out_dir):
    print_section_header("Determine Slide Window Locations")

    # Determine list of files to process
    if len(img_files) == 0:
        img_files = sorted(os.listdir(img_dir))
        img_files = [f for f in img_files if not f.startswith('.')]

    # window_metrics = [(xy_window, x_start_stop, y_start_stop, overlap), ...]
    window_metrics = get_window_metrics()

    for img_file in img_files:
        # Read an image file and correct distortions
        img_name = img_file.split('.')[0]
        img_path = img_dir + img_file
        img = mpimg.imread(img_path)
        img = np.copy(img)

        fig, sub_plts = plt.subplots(2, 2, figsize=(18, 9))
        i_win = 0
        for win_size, x_start_stop, y_start_stop, overlap in window_metrics:
            xy_window = (win_size, win_size)
            window_list = find_slide_windows(img,
                                             x_start_stop=x_start_stop,
                                             y_start_stop=y_start_stop,
                                             xy_window=xy_window,
                                             xy_overlap=(overlap, overlap))
            img_boxed = draw_boxes(img,
                                   window_list,
                                   color=(0, 0, 255),
                                   thick=3)
            img_boxed = draw_boxes(img_boxed,
                                   window_list[2:3],
                                   color=(255, 0, 0),
                                   thick=6)

            sub_plts[i_win // 2,
                     i_win % 2].set_title("Window Size: {}".format(xy_window),
                                          fontsize=20)
            sub_plts[i_win // 2, i_win % 2].imshow(img_boxed)
            i_win += 1
        # Save slide window plot
        fig.tight_layout()
        fig.subplots_adjust(left=0.02, right=0.99, top=0.95, bottom=0.03)
        out_file = "{}{}.jpg".format(out_dir, img_name)
        print("Store the image with slide-window markings to {}".format(
            out_file))
        fig.savefig(out_file)
        plt.close()
def perform_slide_window_search(img_dir,
                                img_files,
                                pickle_file,
                                out_dir_slide=None,
                                out_dir_heat=None,
                                out_dir_detect=None):
    print_section_header("Slide Window Vehicle Search")

    # Load trained classifier
    with open(pickle_file, 'rb') as in_file:
        class_pickle = pickle.load(in_file)

    # Determine list of files to process
    if len(img_files) == 0:
        img_files = sorted(os.listdir(img_dir))
        img_files = [f for f in img_files if not f.startswith('.')]

    for img_file in img_files:
        img_name = img_file.split('.')[0]
        img_path = img_dir + img_file
        img = mpimg.imread(img_path)
        find_vehicle_bounding_boxes(img, class_pickle, None, img_name,
                                    out_dir_slide, out_dir_heat,
                                    out_dir_detect)
def demonstrate_feature_extraction(vehicle_img_dir,
                                   non_vehicle_img_dir,
                                   out_dir,
                                   cspace='YCrCb',
                                   spatial_size=(32, 32),
                                   hist_bins=32,
                                   orient=9,
                                   pix_per_cell=8,
                                   cell_per_block=2,
                                   hog_channel='ALL'):

    print_section_header("Feature Extraction")
    print("* Color Space: {}".format(cspace))
    print("* Spatial Feature Size: {}".format(spatial_size))
    print("* Color Histogram Bins: {}".format(hist_bins))
    print("* HOG Features:")
    print("  - Orientations: {}".format(orient))
    print("  - Pixels Per Cell: {}".format(pix_per_cell))
    print("  - Cells Per Block: {}".format(cell_per_block))
    print("  - HOG channel(s): {}".format(hog_channel))

    vehicle_imgs = glob.glob(vehicle_img_dir + '**/*.png', recursive=True)
    non_vehicle_imgs = glob.glob(non_vehicle_img_dir + '**/*.png',
                                 recursive=True)
    n_vehicle = len(vehicle_imgs)
    n_non_vehicle = len(non_vehicle_imgs)
    i_vehicle = np.random.randint(n_vehicle)
    i_non_vehicle = np.random.randint(n_non_vehicle)
    print("* Number Of Vehicle Training Images: {}".format(n_vehicle))
    print("* Number Of Non-Vehicle Training Images: {}".format(n_non_vehicle))
    print("* Vehicle Image Sample: {} ({})".format(vehicle_imgs[i_vehicle],
                                                   i_vehicle))
    print("* Non-Vehicle Image Sample: {} ({})".format(
        non_vehicle_imgs[i_non_vehicle], i_non_vehicle))

    # Plot the selected train images
    color_converter = determine_color_converter('BGR', cspace)
    img_vehicle = cv2.imread(vehicle_imgs[i_vehicle])
    img_vehicle = cv2.cvtColor(img_vehicle, cv2.COLOR_BGR2RGB)
    img_non_vehicle = cv2.imread(non_vehicle_imgs[i_non_vehicle])
    img_non_vehicle = cv2.cvtColor(img_non_vehicle, cv2.COLOR_BGR2RGB)
    fig_hog, (sub1, sub2) = plt.subplots(1, 2, figsize=(10, 4))
    sub1.imshow(img_vehicle)
    sub1.set_title('Vehicle', fontsize=20)
    sub2.imshow(img_non_vehicle)
    sub2.set_title('Non-Vehicle', fontsize=20)
    fig_hog.tight_layout()
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.1)
    out_file = "{}sample_image_{}_{}.jpg".format(out_dir, i_vehicle,
                                                 i_non_vehicle)
    plt.savefig(out_file)
    plt.close()

    # Convert image color space
    color_converter = determine_color_converter('RGB', cspace)
    img_vehicle_conv = cv2.cvtColor(img_vehicle, color_converter)
    img_non_vehicle_conv = cv2.cvtColor(img_non_vehicle, color_converter)

    # Extract features from training data
    start_time = time.perf_counter()
    print('\nStart feature extraction on sample images (at {:.3f})'.format(
        start_time))
    fig_hog, sub_plts_hog = plt.subplots(3, 4, figsize=(20, 15))
    fig_hist, sub_plts_hist = plt.subplots(2, 1, figsize=(10, 10))
    i_img = 0
    for label, img in zip(('Vehicle', 'Non-Vehicle'),
                          (img_vehicle_conv, img_non_vehicle_conv)):
        # HOG features
        for channel in range(img.shape[2]):
            _, hog_image = get_hog_features(img[:, :, channel],
                                            orient,
                                            pix_per_cell,
                                            cell_per_block,
                                            vis=True,
                                            feature_vec=True)
            sub_plts_hog[channel, 2 * i_img].set_title("{} CH-{}".format(
                label, channel + 1),
                                                       fontsize=20)
            sub_plts_hog[channel, 2 * i_img].imshow(img[:, :, channel],
                                                    cmap='gray')
            sub_plts_hog[channel,
                         2 * i_img + 1].set_title("{} CH-{} HOG".format(
                             label, channel + 1),
                                                  fontsize=20)
            sub_plts_hog[channel, 2 * i_img + 1].imshow(hog_image, cmap='gray')

        # Color histogram
        hist_features = color_hist(img, nbins=hist_bins)
        sub_plts_hist[i_img].set_title("{} - Color Histogram".format(label),
                                       fontsize=20)
        sub_plts_hist[i_img].bar(range(len(hist_features)), hist_features)
        i_img += 1
    # Save hog images
    fig_hog.tight_layout()
    fig_hog.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
    out_file = "{}hog_feature_{}_{}.jpg".format(out_dir, i_vehicle,
                                                i_non_vehicle)
    fig_hog.savefig(out_file)
    # Save color histogram
    fig_hist.tight_layout()
    out_file = "{}color_hist_{}_{}.jpg".format(out_dir, i_vehicle,
                                               i_non_vehicle)
    fig_hist.savefig(out_file)
    plt.close()
    end_time = time.perf_counter()
    print('Completed feature extraction on sample images {:.3f}s (at {:.3f})'.
          format(end_time - start_time, end_time))