Пример #1
0
def main():

    makeFolderIfMissing(config.REPOS_FOLDER)
    username_list = spreadsheet.getMyUsernames(
        config.USERNAME)  #TODO prompt on no usernames written
    if not username_list:
        print(
            "No usernames to grade found. Run setup.py if you haven't yet. If you have, the grading list might not have been updated yet."
        )

    githubcommands.setup_repos(username_list, config.REPO_URL_TEMPLATE,
                               config.REPOS_FOLDER, config.STUDENT_REPO_PATH,
                               config.DUE_DATE)
    deployScriptForUsernames(username_list)
    with open("usernames.txt", "w") as f:
        for name in username_list:
            f.write("{}\n".format(name))
    for name in username_list:
        assignment_path = config.join_path(
            config.STUDENT_REPO_PATH.format(name), config.ASSIGNMENT_NAME)
        try:
            for p in [
                    f for f in os.listdir(assignment_path)
                    if "py" in config.join_path(assignment_path, f)
            ]:
                pass
                #banish_god_forsaken_code(config.join_path(assignment_path, p))
        except Exception as e:
            print(e)
def project_video_process():
    frame = 0

    def process_image(image):
        """
        process image (identify the lane line), return the processed image
        :param image: input image
        :return: the processed image
        """
        nonlocal frame
        frame += 1
        text = 'frame: %d' % frame
        detected_img = car_multiple_detections(image, False)
        cv2.putText(detected_img, text, (10, detected_img.shape[0] - 40),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        return detected_img

    from moviepy.editor import VideoFileClip

    video_name = 'project_video.mp4'
    # video_name = 'challenge_video.mp4'
    # video_name = 'harder_challenge_video.mp4'

    name, ext = video_name.split('.')
    input_path = cfg.join_path(cfg.video_path['videos'], video_name)
    output_path = cfg.join_path(cfg.video_path['output_videos'],
                                name + '_vehicle_detected.' + ext)

    # To speed up the testing process, only process a subclip of the first 5 seconds
    # clip1 = VideoFileClip(input_path).subclip(20, 22)
    clip1 = VideoFileClip(input_path)
    white_clip = clip1.fl_image(
        process_image)  # NOTE: this function expects color images!!
    white_clip.write_videofile(output_path, audio=False)
Пример #3
0
def test_locate_lane_lines():
    image_name = 'test6'
    threshold_image_name = image_name + '_threshold'

    input_path = cfg.join_path(cfg.line_finder['output'], threshold_image_name + '.jpg')
    input_img = cv2.imread(input_path, cv2.IMREAD_GRAYSCALE)

    transformer = TransformPerspective()

    input_img = transformer.transform(input_img)

    locator = Locator(input_img)
    left_located_line, right_located_line = locator.sliding_window()
    left_fit = left_located_line.fit_coefficients
    right_fit = right_located_line.fit_coefficients
    out_img = locator.visualize()
    plt.imshow(out_img)
    output_path = cfg.join_path(cfg.line_finder['output'], threshold_image_name + '_line1.jpg')
    plt.savefig(output_path)

    locatorWithPrior = LocatorWithPrior(input_img, left_fit, right_fit)
    left_located_line, right_located_line=locatorWithPrior.sliding_window()
    left_fit = left_located_line.fit_coefficients
    right_fit = right_located_line.fit_coefficients
    out_img = locatorWithPrior.visualize()
    plt.imshow(out_img)
    output_path = cfg.join_path(cfg.line_finder['output'], threshold_image_name + '_line2.jpg')
    plt.savefig(output_path)

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(input_img).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    ploty, left_fitx, right_fitx = Locator.pixels_on_fit(input_img.shape[0], left_fit, right_fit)

    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = transformer.inverse_transform(color_warp)

    # Combine the result with the original image
    origin_path = cfg.join_path(cfg.line_finder['input'], image_name + '.jpg')
    origin_img = cv2.imread(origin_path)
    origin_img = cv2.cvtColor(origin_img, cv2.COLOR_BGR2RGB)

    undistort = Undistort()
    undist = undistort.undistort_image(origin_img)
    output_path = cfg.join_path(cfg.line_finder['output'], image_name + '_line.jpg')
    result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
    plt.imshow(result)
    plt.savefig(output_path)
def test():
    image_name = 'test3'
    image_path = cfg.join_path(cfg.line_finder['input'], image_name + '.jpg')
    img = mpimg.imread(image_path)

    # frame_name = 'project_video_frame500'
    # frame_path = cfg.join_path(cfg.video_path['frames'], 'project_video', frame_name + '.jpg')
    # img = mpimg.imread(frame_path)
    save_path = cfg.join_path(cfg.vehicle_detection['output'],
                              image_name + '_car_finder.jpg')
    car_multiple_detections(img, True, save_path)
def pipeline_prototype():
    panel_scale = 0.33
    image_name = 'test6'
    image_path = cfg.join_path(cfg.line_finder['input'], image_name + '.jpg')
    img = cv2.imread(image_path)

    undistort = Undistort()
    undist = undistort.undistort_image(img)
    combined_binary, combined_binary_masked = threshold_pipeline(undist, False)

    input_img = combined_binary

    transformer = TransformPerspective()
    input_img = transformer.transform(input_img)

    locator = Locator(input_img)
    left_located_line, right_located_line = locator.sliding_window()
    left_fit = left_located_line.fit_coefficients
    right_fit = right_located_line.fit_coefficients
    out_img_1 = cv2.resize(locator.visualize(), (0, 0), fx=panel_scale, fy=panel_scale)

    locatorWithPrior = LocatorWithPrior(input_img, left_fit, right_fit)
    left_located_line, right_located_line = locatorWithPrior.sliding_window()
    left_fit = left_located_line.fit_coefficients
    right_fit = right_located_line.fit_coefficients
    out_img_2 = cv2.resize(locatorWithPrior.visualize(), (0, 0), fx=panel_scale, fy=panel_scale)

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(input_img).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    ploty, left_fitx, right_fitx = Locator.pixels_on_fit(input_img.shape[0], left_fit, right_fit)

    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = transformer.inverse_transform(color_warp)

    undist = cv2.cvtColor(undist, cv2.COLOR_BGR2RGB)
    result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)

    result = cv2_overlay(result, out_img_1, (5, 5))
    result = cv2_overlay(result, out_img_2, (5 + out_img_1.shape[1], 5))

    plt.imshow(result)
    output_path = cfg.join_path(cfg.line_finder['output'], image_name + '_line.jpg')
    plt.show()
def main():
    import config as cfg
    import glob
    import matplotlib.image as mpimg
    import matplotlib.pyplot as plt

    vehicle_path = cfg.vehicle_detection['vehicles']
    cars = glob.glob(cfg.join_path(vehicle_path, '**/*.png'))
    non_vehicle_path = cfg.vehicle_detection['non-vehicles']
    notcars = glob.glob(cfg.join_path(non_vehicle_path, '**/*.png'))

    car_image = mpimg.imread(cars[np.random.randint(0, len(cars))])
    notcar_image = mpimg.imread(notcars[np.random.randint(0, len(notcars))])

    orient = cfg.vehicle_detection['orient']
    pix_per_cell = cfg.vehicle_detection['pix_per_cell']
    cell_per_block = cfg.vehicle_detection['cell_per_block']

    features, car_hog = get_hog_features(car_image[:, :, 2],
                                         orient,
                                         pix_per_cell,
                                         cell_per_block,
                                         vis=True,
                                         feature_vec=True)
    features, notcar_hog = get_hog_features(notcar_image[:, :, 2],
                                            orient,
                                            pix_per_cell,
                                            cell_per_block,
                                            vis=True,
                                            feature_vec=True)

    fig = plt.figure(figsize=(12, 10))
    plt.subplot(221)
    plt.imshow(car_image)
    plt.title('Car', fontsize=18)
    plt.subplot(222)
    plt.imshow(notcar_image)
    plt.title('Not Car', fontsize=18)
    plt.subplot(223)
    plt.imshow(car_hog, cmap='gray')
    plt.title('Car HOG', fontsize=18)
    plt.subplot(224)
    plt.imshow(notcar_hog, cmap='gray')
    plt.title('Not Car HOG', fontsize=18)

    fig.tight_layout()
    # plt.show()
    plt.savefig(cfg.join_path(cfg.vehicle_detection['output'], 'hog.jpg'))
Пример #7
0
def findMisspelledAssignmentFolder(username,
                                   repo_path=config.STUDENT_REPO_PATH,
                                   regex=config.typoRE):
    for folder in os.listdir(config.join_path(repo_path.format(username))):
        if re.search(regex, folder):
            return folder
    return ""
Пример #8
0
def main():

    input_path = cfg.camera_calibration['input']

    if input_path is not None:
        image_paths = glob.glob(cfg.join_path(input_path, 'calibration*.jpg'))

        obj_points, img_points = find_corners(image_paths, cfg.camera_calibration['output'],
                                              cfg.camera_calibration['grid_rows'],
                                              cfg.camera_calibration['grid_columns'])

        image_name = 'calibration8'
        image_path = cfg.join_path(input_path, image_name + '.jpg')
        img = cv2.imread(image_path)
        img_size = (img.shape[1], img.shape[0])

        camera_calibration(obj_points, img_points, img_size, cfg.camera_calibration['camera_calibration_file'])
Пример #9
0
def test():
    """
    test the workflow using a test image
    :return:
    """
    image_name = 'test6'
    image_path = cfg.join_path(cfg.line_finder['input'], image_name + '.jpg')
    image = cv2.imread(image_path)
    lane = Lane(threshold_pipeline)
    lane.find_line(image)
    result = lane.visualize()
    result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)

    plt.imshow(result)
    output_path = cfg.join_path(cfg.line_finder['output'],
                                image_name + '_line.jpg')
    plt.show()
    plt.savefig(output_path)
Пример #10
0
def main():
    perspective_matrix_path = cfg.line_finder['perspective_matrix_file']

    height = 720
    src = np.float32([[195, height], [593, 450], [689, 450], [1125, height]])
    dst = np.float32([[315, height], [315, 0], [965, 0], [965, height]])
    M, Minv = generate_matrix(src, dst)
    dict_pickle = dict()
    dict_pickle['M'] = M
    dict_pickle['Minv'] = Minv
    pickle.dump(dict_pickle, open(perspective_matrix_path, "wb"))

    transformer = TransformPerspective()

    image_name = 'straight_lines1'

    image_path = cfg.join_path(cfg.line_finder['input'], image_name + '.jpg')
    warped_img_path = cfg.join_path(cfg.line_finder['output'],
                                    image_name + '_warped.jpg')
    warped_unwarped_path = cfg.join_path(cfg.line_finder['output'],
                                         image_name + '_warped_unwarped.jpg')

    img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
    undistort = Undistort()
    img = undistort.undistort_image(img)

    warped_img = transformer.transform(img)

    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
    f.tight_layout()
    ax1.imshow(img)
    ax1.plot(src[:, 0], src[:, 1], '-r')
    ax1.set_title('Original Image', fontsize=50)

    ax2.imshow(warped_img)
    ax2.plot(dst[:, 0], dst[:, 1], '-r')
    ax2.set_title('Warped Image', fontsize=50)
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
    plt.savefig(warped_img_path)

    warped_unwarped_img = transformer.inverse_transform(warped_img)
    warped_unwarped_img = cv2.cvtColor(warped_unwarped_img, cv2.COLOR_BGR2RGB)
    cv2.imwrite(warped_unwarped_path, warped_unwarped_img)
Пример #11
0
def test_undistort():
    undistort = Undistort()

    image_name = 'calibration1'
    image_path = cfg.join_path(cfg.camera_calibration['input'],
                               image_name + '.jpg')

    img = cv2.imread(image_path)

    undistorted_img_save_path = cfg.join_path(cfg.camera_calibration['output'],
                                              image_name + '_undistorted.jpg')

    contrast_images_save_path = cfg.join_path(cfg.camera_calibration['output'],
                                              image_name + '_contrast.jpg')

    undistorted_img = undistort.undistort_image(img)
    cv2.imwrite(undistorted_img_save_path, undistorted_img)

    undistort.plot_contrast(img, undistorted_img, contrast_images_save_path)
Пример #12
0
def writeTestCodeToFolder(username,
                          repoPath=config.STUDENT_REPO_PATH,
                          folder=config.ASSIGNMENT_NAME,
                          path_to_template=config.TEMPLATE_PATH):
    studentPath = config.join_path(repoPath.format(username), folder,
                                   "gradingTests.py")
    graderPath = os.path.abspath(os.path.dirname(
        __file__))  # path to the grader dir, not where the script was run

    with open(studentPath, "w+") as tests_file:
        #add the current folder temporarily to python path so stuff here can be imported elsewhere
        tests_file.write(
            """import sys\nsys.path.insert(0, r"{}")\n""".format(graderPath))
        #add a reference to the template folder so the test cases can be loaded
        tests_file.write("""TEMPLATE_FOLDER=r"{}"\n""".format(
            config.join_path(graderPath, config.TEMPLATE_FOLDER)))
        tests_file.write("""username='******'\n""".format(username))
        with open(path_to_template, "r") as template:
            for line in template:
                tests_file.write(line.replace("\\n", os.linesep))
Пример #13
0
def main():

    setup.makeFolderIfMissing(config.REPOS_FOLDER)
    username_list = spreadsheet.getAllUsernames(
    )  #TODO prompt on no usernames written
    #githubcommands.setup_repos(username_list, config.REPO_URL_TEMPLATE, config.REPOS_FOLDER, config.STUDENT_REPO_PATH, config.DUE_DATE)

    with open("results.csv", "w") as res:
        writer = csv.writer(res, delimiter=",")
        writer.writerow([
            "username", "gitignore", "noCodeFolder", "assignment0",
            "helloworld", "syntax"
        ])
        for name in username_list:
            base_path = config.join_path(config.STUDENT_REPO_PATH.format(name))
            a0path = config.join_path(base_path, "Assignment0")
            helloWorldRuns = False

            gitignoreExists = os.path.isfile(
                config.join_path(base_path, ".gitignore"))
            noCode = not os.path.isdir(config.join_path(base_path, ".vscode"))
            ass0Exists = os.path.isdir(a0path)
            helloWorldExists = os.path.isfile(
                config.join_path(a0path, "helloworld.py"))
            if helloWorldExists:
                try:
                    proc = subprocess.Popen("python3 helloworld.py",
                                            cwd=a0path,
                                            shell=True,
                                            stdout=subprocess.DEVNULL)
                    proc.wait()
                    helloWorldRuns = True
                except Exception as e:
                    pass

            writer.writerow([
                name, gitignoreExists, noCode, ass0Exists, helloWorldExists,
                helloWorldRuns
            ])
def main():
    image_name = 'calibration8'
    img_path = cfg.join_path(cfg.camera_calibration['output'],
                             image_name + '_undistorted.jpg')
    warped_img_path = cfg.join_path(cfg.camera_calibration['output'],
                                    image_name + '_warped.jpg')

    grid_rows = cfg.camera_calibration['grid_rows']
    grid_columns = cfg.camera_calibration['grid_columns']
    offset = 0

    img = cv2.imread(img_path)
    warped_img, perspective_M = unwarp_chessboard(img, grid_rows, grid_columns,
                                                  offset)

    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
    f.tight_layout()
    ax1.imshow(img)
    ax1.set_title('Original Image', fontsize=50)
    ax2.imshow(warped_img)
    ax2.set_title('Warped Image', fontsize=50)
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
    plt.savefig(warped_img_path)
Пример #15
0
def find_corners(image_paths, image_output_folder, row_number, col_number,
                 obj_points_filename=None, img_points_filename=None):
    """
    find chess board corners of images whose paths are given, save images with corner drawn in the given output folder
    also save the object points and image points in npy files
    :param image_paths: paths of images containing chess board
    :param image_output_folder: folder to save images with corner drawn
    :param row_number: number of rows of inner corners on the chess board
    :param col_number: number of columns of inner corners on the chess board
    :param obj_points_filename: filename to save the object points
    :param img_points_filename: filename to save the image points
    :return: object points and image points
    """

    # Arrays to store object points and image points from ALL the images.
    obj_points = []  # 3d points in real world space
    img_points = []  # 2d points in image plane.

    # prepare object points for a SINGLE image, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    obj_point = np.zeros((row_number * col_number, 3), np.float32)
    obj_point[:, :2] = np.mgrid[0:col_number, 0:row_number].T.reshape(-1, 2)

    # Step through the list of calibration images and search for chessboard corners
    for idx, file_name in enumerate(image_paths):

        if 'jpg' in file_name:
            img = cv2.imread(file_name)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # Find the chessboard corners
            ret, corners = cv2.findChessboardCorners(gray, (col_number, row_number), None)

            # If found, add object points, image points
            if ret:
                obj_points.append(obj_point)
                img_points.append(corners)

                # Draw and display the corners
                cv2.drawChessboardCorners(img, (col_number, row_number), corners, ret)
                write_name = cfg.join_path(image_output_folder, file_name.split('/')[-1])
                cv2.imwrite(write_name, img)
                # cv2.imshow('img', img)
                # cv2.waitKey(500)

    # cv2.destroyAllWindows()
    if obj_points_filename is not None and img_points_filename is not None:
        np.save(obj_points_filename, obj_points)
        np.save(img_points_filename, img_points)
    return obj_points, img_points
Пример #16
0
def test_threshold_pipeline():
    image_name = 'test5'

    image_path = cfg.join_path(cfg.line_finder['input'], image_name + '.jpg')
    img = cv2.imread(image_path)

    undistort = Undistort()
    img = undistort.undistort_image(img)
    undist_path = cfg.join_path(cfg.line_finder['output'],
                                image_name + '_undist.jpg')
    cv2.imwrite(undist_path, img)

    images = threshold_pipeline(img)
    images.plot()

    thresholded_path = cfg.join_path(cfg.line_finder['output'],
                                     image_name + '_threshold.jpg')
    thresholded_mask_path = cfg.join_path(cfg.line_finder['output'],
                                          image_name + '_threshold_masked.jpg')
    img_line_path = cfg.join_path(cfg.line_finder['output'],
                                  image_name + '_threshold_lane.jpg')

    thresholded_binary = images.get('thresholded')
    thresholded_masked = images.get('thresholded_masked')

    line = np.dstack(
        (thresholded_masked * 255, np.zeros_like(thresholded_masked),
         np.zeros_like(thresholded_masked)))

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img_line = cv2.addWeighted(img, 1, line, 1, 0)
    img_line = cv2.cvtColor(img_line, cv2.COLOR_BGR2RGB)

    cv2.imwrite(thresholded_path, thresholded_binary * 255)
    cv2.imwrite(thresholded_mask_path, thresholded_masked * 255)
    cv2.imwrite(img_line_path, img_line)
        # Append the new feature vector to the features list
        features.append(hog_features)
    # Return list of feature vectors
    return features


test_size = 0.2

vehicle_path = cfg.vehicle_detection['vehicles']

cars_GTI_folders = ['GTI_Far', 'GTI_Left', 'GTI_MiddleClose', 'GTI_Right']

cars_GTI_trains = []
cars_GTI_tests = []
for cars_GTI_folder in cars_GTI_folders:
    cars_GTI_paths = glob.glob(cfg.join_path(vehicle_path, cars_GTI_folder + '/*.png'))
    split = int(len(cars_GTI_paths) * test_size)
    cars_GTI_tests.extend(cars_GTI_paths[0:split])
    cars_GTI_trains.extend(cars_GTI_paths[split:])

cars_KITTI = glob.glob(cfg.join_path(vehicle_path, 'KITTI_extracted/*.png'))
non_vehicle_path = cfg.vehicle_detection['non-vehicles']
notcars = glob.glob(cfg.join_path(non_vehicle_path, '**/*.png'))

t = time.time()
cars_GTI_train_features = extract_features(cars_GTI_trains)
cars_GTI_test_features = extract_features(cars_GTI_tests)

car_KITTI_features = extract_features(cars_KITTI)
notcar_features = extract_features(notcars)
Пример #18
0
    lane = Lane(threshold_pipeline)

    # lane = Lane(challenge_threshold_pipeline)


    def process_image(image):
        """
        process image (identify the lane line), return the processed image
        :param image: input image
        :return: the processed image
        """
        lane.find_line(image)
        return lane.visualize()

    from moviepy.editor import VideoFileClip

    video_name = 'project_video.mp4'
    # video_name = 'challenge_video.mp4'
    # video_name = 'harder_challenge_video.mp4'

    input_path = cfg.join_path(cfg.video_path['videos'], video_name)
    output_path = cfg.join_path(cfg.video_path['output_videos'], video_name)

    # To speed up the testing process, only process a subclip of the first 5 seconds
    # clip1 = VideoFileClip(input_path).subclip(0, 5)
    clip1 = VideoFileClip(input_path)
    white_clip = clip1.fl_image(
        process_image)  # NOTE: this function expects color images!!
    white_clip.write_videofile(output_path, audio=False)