def find_perspective_transform(self):
        image = undistort_image(self.load_image(), self._mtx, self._dist)

        Pipeline.save_image('{}_undistorted.jpg'.format(self._filename_no_ext), image)

        imshape = image.shape

        src_points = [(520, 504), (769, 504), (1100, imshape[0]), (217, imshape[0])]
        src = np.array([src_points], dtype=np.float32)
        dst = np.array([(350, 0), (950, 0), (950, imshape[0]), (350, imshape[0])], dtype=np.float32)

        M = cv2.getPerspectiveTransform(src, dst)
        M_inv = cv2.getPerspectiveTransform(dst, src)

        logging.info('Writing perspective matrices...')
        with open('{}.pkl'.format(PERSPECTIVE_FILE), 'wb') as f:
            pickle.dump([M, M_inv], f)

        # im2 = cv2.polylines(image, src.astype(np.int32), 1, (0, 255, 0), thickness=2)

        warped = cv2.warpPerspective(image, M, (imshape[1], imshape[0]), flags=cv2.INTER_LINEAR)

        Pipeline.save_image('warped_straight.jpg', warped)

        if self._verbose:
            # warped = cv2.polylines(warped, dst.astype(np.int32)[:, np.newaxis], 1, (0, 255, 0), thickness=2)

            plt.imshow(warped)
            plt.waitforbuttonpress()

        logging.info('Perspective matrices computed and stored')
    def process_image(self, image):
        self._frame += 1

        M, M_inv = self._M, self._M_inv

        image_undist = undistort_image(image, self._mtx, self._dist)

        if self._verbose:
            Pipeline.save_image('{}_undist.jpg'.format(self._filename_no_ext), image_undist)

        imshape = image.shape

        logging.info('Creating binary threshold...')
        binary = self.apply_thresholing(image_undist)

        if self._verbose:
            Pipeline.save_image('{}_binary.jpg'.format(self._filename_no_ext), binary, cmap='gray')

        if self._show:
            show_image_gray(binary)
            plt.waitforbuttonpress()

        logging.info('Warping the image')
        warped = cv2.warpPerspective(image_undist, M, (imshape[1], imshape[0]), flags=cv2.INTER_LINEAR)

        if self._verbose:
            logging.info('Warped image')
            Pipeline.save_image('{}_warp.jpg'.format(self._filename_no_ext), warped)

        binary_warped = cv2.warpPerspective(binary, M, (imshape[1], imshape[0]), flags=cv2.INTER_LINEAR)
        if self._verbose:
            Pipeline.save_image('{}_binary_wap.jpg'.format(self._filename_no_ext), binary_warped, cmap='gray')

        result, fit_image, curvatures, offset = self._lane_fitter.fit_transform(binary_warped, image_undist)

        if self._verbose:
            Pipeline.save_image('{}_binary_warp_fit.jpg'.format(self._filename_no_ext), fit_image)

        p = self._parameters
        if self._frame - p.frame >= 5 or p.frame == 0:
            self._parameters = Parameters(self._frame, curvatures[0], curvatures[1], offset)
            p = self._parameters

        if self._add_overlay:
            result = Pipeline.add_overlay(result, p, binary, warped, fit_image)

        logging.info('Creating output image')

        if self._verbose:
            Pipeline.save_image("{}.jpg".format(self._filename_no_ext), result, out_dir=OUT_DIR)

        if self._show:
            plt.imshow(result)
            plt.waitforbuttonpress()

        return result
Ejemplo n.º 3
0
    def __getitem__(self, idx):
        sample = self.samples[idx]
        s = self.scale

        # load and undistort target image
        tgt_img = load_as_float(sample['tgt_img'])
        tgt_img = undistort_image(tgt_img)
        h, w = tgt_img.shape
        # downsample and normalize target image resolution
        tgt_img = np.expand_dims(cv2.resize(tgt_img,
                                            (w // s, h // s)), 0) / 255.0

        # load and undistort reference images
        ref_img = sample['ref_img']
        ref_img = load_as_float(ref_img)
        ref_img = undistort_image(ref_img)
        # downsample and normalize reference image resolution
        ref_img = np.expand_dims(cv2.resize(ref_img,
                                            (w // s, h // s)), 0) / 255.0

        return tgt_img, ref_img, sample['tgt_to_ref']
Ejemplo n.º 4
0
def vehicles_pipeline(img,
                      history,
                      camera_mtx,
                      dist_params,
                      scaler,
                      model,
                      video=False):

    print('Step 1: Undistortion of image')
    image = utils.undistort_image(img, camera_mtx, dist_params)

    print('Step 2: Find cars')
    detections = find_cars(image, scaler, model)

    print('Step 3: Generate heat map & draw cars')
    [output, history, heatmap] = generate_heatmap(image, detections, history)

    return output, history
Ejemplo n.º 5
0
def pipeline(img, history, camera_mtx, dist_params, video=False):

    print('Step 1: Undistortion of image')
    undistort = utils.undistort_image(img, camera_mtx, dist_params)

    print('Step 2: Perform perspective transform')
    topview = utils.birdseye(undistort)

    print('Step 3: Apply image masks')
    masked_topview = utils.apply_masks(topview)

    print('Step 4: Find lanes')
    [left_lane, right_lane,
     history] = utils.find_lanes(masked_topview, history, video)

    print('Step 5: Draw lanes & transform back')
    output = utils.plot_lanes(undistort, left_lane, right_lane)

    return output, history
def main():
    images = glob.glob('camera_cal/*.jpg')

    objp = np.zeros((9 * 6, 3), np.float32)
    objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)

    obj_points = []
    img_points = []

    if not os.path.exists('images/camera_cal'):
        os.mkdir('images/camera_cal')

    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        ret, corners = cv2.findChessboardCorners(img_gray, (9, 6), None)

        if ret:
            obj_points.append(objp)
            img_points.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (9, 6), corners, ret)
            write_name = 'images/camera_cal/corners_found' + str(idx) + '.jpg'
            cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()

    # Do camera calibration given object points and image points
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points,
                                                       (720, 1280), None, None)

    print('Writing camera matrix and distortion coefficients...')
    with open('camera.pkl', 'wb') as f:
        pickle.dump({'mtx': mtx, 'dist': dist}, f)

    undistorted = undistort_image(cv2.imread('camera_cal/calibration1.jpg'),
                                  mtx, dist)
    cv2.imwrite("images/camera_cal/calibration1_undistorted.jpg", undistorted)
Ejemplo n.º 7
0
            print('[%d/%d] Undistorting % s ...' %
                  (i + 1, len(image_list), fname))
            img_l = cv2.imread(os.path.join(left_dir, fname))
            img_r = cv2.imread(os.path.join(right_dir, fname))

            dst_l = cv2.undistort(img_l, camera_matrix_l, dist_coefs_l)
            dst_r = cv2.undistort(img_r, camera_matrix_r, dist_coefs_r)
            output = np.concatenate((dst_l, dst_r), axis=1)
            h, w = output.shape[:2]
            for i in range(h // 10, h, h // 10):
                cv2.line(output, (0, i), (w, i), (0, 0, 255))
            cv2.imwrite(os.path.join(debug_dir, 'undistorted_' + fname),
                        output)
            cv2.imshow('undistorted', output)

            dst_l = undistort_image(img_l, camera_matrix_l, dist_coefs_l, R1,
                                    P1)
            dst_r = undistort_image(img_r, camera_matrix_r, dist_coefs_r, R2,
                                    P2)
            output = np.concatenate((dst_l, dst_r), axis=1)
            h, w = output.shape[:2]
            for i in range(h // 10, h, h // 10):
                cv2.line(output, (0, i), (w, i), (0, 255, 0))

            cv2.imwrite(os.path.join(debug_dir, 'rectified_' + fname), output)
            cv2.imshow('rectified', output)

            cv2.waitKey(20)

        cv2.waitKey(0)
Ejemplo n.º 8
0
    dist_coefs = fs.getNode('D').mat()
    if dist_coefs is None:
        dist_coefs = fs.getNode('D1').mat()
    if dist_coefs is None:
        dist_coefs = np.zeros(5)

    scale = height / calib_height
    camera_matrix = utils.scale_camera_matrix(camera_matrix, scale)

    if args.rectify:
        R = fs.getNode('R1').mat()
        P = fs.getNode('P1').mat()
    else:
        P = None
        R = None
    if P is None:
        P = camera_matrix
    else:
        P = utils.scale_camera_matrix(P, scale)

    dst = utils.undistort_image(img,
                                camera_matrix,
                                dist_coefs,
                                R,
                                P,
                                interpolation=cv2.INTER_NEAREST)
    cv2.imwrite(args.output, dst)
    # cv2.imshow('undistorted', dst)
    cv2.waitKey(0)
Ejemplo n.º 9
0
    fs.write('image_size', image_size)
    fs.write('K', camera_matrix)
    fs.write('D', dist_coefs)
    fs.release()

    if args.debug:
        tot_err, errs = compute_reprojection_errors(obj_points, img_points,
                                                    rvecs, tvecs,
                                                    camera_matrix, dist_coefs)
        print('Avg Reprojection Error:', tot_err)
        print('Per View Reprojection Errors:')
        for i, err in enumerate(errs):
            print(i, selected_images[i], err)

    if args.debug:
        for i, fname in enumerate(image_list):
            print('[%d/%d] Undistorting % s ...' %
                  (i + 1, len(image_list), fname))
            img = cv2.imread(os.path.join(input_dir, fname))
            dst = undistort_image(img, camera_matrix, dist_coefs, None,
                                  newcameramtx)

            x, y, w, h = roi
            cv2.rectangle(dst, (x, y), (x + w - 1, y + h - 1), (0, 0, 255), 1)

            cv2.imwrite(os.path.join(debug_dir, 'undistort_' + fname), dst)
            cv2.imshow('rectified', dst)
            cv2.waitKey(20)

    cv2.waitKey(0)
Ejemplo n.º 10
0
import cv2
import pickle
from utils import detect_show_markers, undistort_image

if __name__ == '__main__':
    cap = cv2.VideoCapture(0)
    aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)
    parameters = cv2.aruco.DetectorParameters_create()
    ''' Load coefficients '''
    with open('cam_param.pkl', 'rb') as f:
        camera_param = pickle.load(f)
    camera_mtx, dist_coefficients, _, _, _, _ = camera_param

    while True:
        ''' Capture frame-by-frame '''
        _, img = cap.read()
        ''' Undistorting '''
        img = undistort_image(img, camera_mtx, dist_coefficients)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ''' Show detected marker '''
        detect_show_markers(img, gray, aruco_dict, parameters, camera_mtx,
                            dist_coefficients)
        ''' Press esc for close '''
        if cv2.waitKey(5) == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
"""

import numpy as np
import cv2
from utils import read_pts, get_im_num, increment_im, get_next_image, undistort_pts, get_line_mask, undistort_image, read_transformations
import matplotlib.pyplot as plt
from skimage import transform
import matplotlib.cm as cm
from matplotlib.gridspec import GridSpec

Image_path = './camera_calibration/test/dji_0691.jpg'

optical = cv2.imread(Image_path)
optical_lines = get_line_mask(optical)
thermal = cv2.imread(get_next_image(Image_path))
thermal_undistorted = undistort_image(thermal)

#transforms = read_transformations('./Newest_data/transformations_corrected_affine.txt')
#t_avg = np.median(transforms, axis=0)
#print(t_avg)
t_avg = np.load('./Newest_data/the_transform.npy')
print(t_avg)
thermal_undistorted_transformed = transform.warp(
    thermal_undistorted,
    transform.AffineTransform(t_avg),
    output_shape=optical.shape)

#fig = plt.figure(figsize=(7,14))
#ax = fig.add_subplot(111)
#ax.set_ylabel('o')
#ax.xaxis.label.set_color('red')
Ejemplo n.º 12
0
 def undistort_stereo_images(self, img_l, img_r):
     img_l = utils.undistort_image(img_l, self.K1, self.D1, self.R1,
                                   self.P1)
     img_r = utils.undistort_image(img_r, self.K2, self.D2, self.R2,
                                   self.P2)
     return img_l, img_r