Пример #1
2
    def correctSkew(self, original_img):
        '''
        Applies skew matrix values to a given photo
            - Only creates dst if use_skew_correction = True or None
                - None implies this is being used for calibration purposes
                - If user has turned skew correction off, then it just returns the original image

        Args:
            original_img (string): photo which needs skew correction

        Returns:
            If using skew correction:
                corrected_img (string): photo corrected for skew
            Else:
                original_img (string): the unaltered original image
        '''



        if self.use_skew_correction is None:

            cv2.imwrite("skew1.jpg", original_img)
            # This means that the image we are using for calibration is permanently rewritten with the skew matrix
            # corrected_img = cv2.imread("skew1.jpg", 0)

            dst = cv2.undistort(original_img, self.skew_mtx, self.skew_dist, None, self.skew_newcameramtx)

            cv2.imwrite("corrected.jpg", dst)

            corrected_img = cv2.imread("corrected.jpg",0)

            return dst

        elif self.use_skew_correction is True:

            cv2.imwrite("skew1.jpg", original_img)
            # This means that the image we are using for calibration is permanently rewritten with the skew matrix
            # corrected_img = cv2.imread("skew1.jpg", 0)

            dst = cv2.undistort(original_img, self.skew_mtx, self.skew_dist, None, self.skew_newcameramtx)

            cv2.imwrite("corrected.jpg", dst)

            corrected_img = cv2.imread("corrected.jpg",0)

            return corrected_img

        else:
            return original_img
    def disparityCalc(self):
        ############# CALCULATE Disparity ############################
        # 1 make the images grayscale
        gray_left = cv2.cvtColor(self.imgLeft, cv2.cv.CV_BGR2GRAY)
        gray_right = cv2.cvtColor(self.imgRight, cv2.cv.CV_BGR2GRAY)

        # 2 undistort the image pair
        undistorted_image_L = cv2.undistort(gray_left, self.intrinsic_matrixL, self.distCoeffL, None)
        undistorted_image_R = cv2.undistort(gray_right, self.intrinsic_matrixR, self.distCoeffR, None)

        # 3 --> calculate disparity images
        disparity_visual = self.getDisparity(imgLeft=undistorted_image_L, imgRight=undistorted_image_R, method="BM")
        disparity_visual = disparity_visual.astype(np.uint8)
        return disparity_visual
Пример #3
0
	def calibrate_imgs(self):
		'''	
			Uses calibration matrices stored in file to calibrate rotate images
		'''
		# Use OpenCV function to undistort images
		self.calib_image_left = cv2.undistort(self.rotated_image_left, self.calib_matrix_left[0], self.calib_matrix_left[1], None, self.calib_matrix_left[2])
		self.calib_image_right = cv2.undistort(self.rotated_image_right, self.calib_matrix_right[0], self.calib_matrix_right[1], None, self.calib_matrix_right[2])
Пример #4
0
    def captureNextFrame(self):
        """                      
        Capture frame, convert to RGB, and return opencv image      
        """
        ret, frame=self.capture.read()
        if(ret==True):
            self.currentFrame=cv2.cvtColor(frame, cv2.COLOR_BAYER_GB2BGR)
	    self.bwFrame = cv2.cvtColor(self.currentFrame, cv2.COLOR_BGR2GRAY)
	if self.calibration_loaded:
	    self.currentFrame = cv2.undistort(self.currentFrame, self.camera_matrix, self.dist_coefs, None, self.new_camera_matrix) 
	    self.bwFrame = cv2.undistort(self.bwFrame, self.camera_matrix, self.dist_coefs, None, self.new_camera_matrix) 
Пример #5
0
def undistort(image):
        """img: original RGB img to be undistorted
        Return undistorted image.
        """
        undistort_image = cv2.undistort(image, camera_matrix, distortion_coeff, None, camera_matrix)
        
        return undistort_image
    def recent_events(self, events):
        frame = events.get('frame')
        if not frame:
            return
        if self.collect_new:
            img = frame.img
            status, grid_points = cv2.findCirclesGrid(img, (4,11), flags=cv2.CALIB_CB_ASYMMETRIC_GRID)
            if status:
                self.img_points.append(grid_points)
                self.obj_points.append(self.obj_grid)
                self.collect_new = False
                self.count -=1
                self.button.status_text = "{:d} to go".format(self.count)


        if self.count<=0 and not self.calculated:
            self.calculate()
            self.button.status_text = ''

        if self.window_should_close:
            self.close_window()

        if self.show_undistortion:

            adjusted_k,roi = cv2.getOptimalNewCameraMatrix(cameraMatrix= self.camera_intrinsics[0], distCoeffs=self.camera_intrinsics[1], imageSize=self.camera_intrinsics[2], alpha=0.5,newImgSize=self.camera_intrinsics[2],centerPrincipalPoint=1)
            self.undist_img = cv2.undistort(frame.img, self.camera_intrinsics[0], self.camera_intrinsics[1],newCameraMatrix=adjusted_k)
Пример #7
0
        def run(self, camera=False):
                frame = cv2.namedWindow(FRAME_NAME)

                # Set callback
                cv2.setMouseCallback(FRAME_NAME, self.draw)

                if camera:
                        cap = cv2.VideoCapture(0)
                        for i in range(10):
                                status, image = cap.read()
                else:
                        image = cv2.imread('00000001.jpg')

                self.image = cv2.undistort(image, CMATRIX, DIST, None, NCMATRIX)

                # Get various data about the image from the user
                self.get_pitch_outline()

                self.get_zone('Zone_0', 'draw LEFT Defender')
                self.get_zone('Zone_1', 'draw LEFT Attacker')
                self.get_zone('Zone_2', 'draw RIGHT Attacker')
                self.get_zone('Zone_3', 'draw RIGHT Defender')

                self.get_goal('Zone_0')
                self.get_goal('Zone_3')

                print 'Press any key to finish.'
                cv2.waitKey(0)
                cv2.destroyAllWindows()

                # Write out the data
                # self.dump('calibrations/calibrate.json', self.data)
                tools.save_croppings(pitch=self.pitch, data=self.data)
Пример #8
0
def calibrate(filenames):
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*7,3), np.float32)
    objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d point in real world space
    imgpoints = [] # 2d points in image plane.
    images = []
    for filename in filenames:
        # Find the chess board corners. If found, add object points, image points (after refining them)
        img = cv2.imread(filename)
        if img != None:
            print "Loaded " + repr(filename)
        else:
            print "Unable to load image " + repr(filename)
            continue
        images.append(img)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(gray, (7,6), None)
        if ret == True:
            objpoints.append(objp)
            corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
            imgpoints.append(corners2)
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
    print "Loaded all images and calbulated calibration"
    for i, img in enumerate(images):
        img = images[i]
        h, w = img.shape[:2]
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
        dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
        x, y, w, h = roi
        cv2.imwrite( 'calibrated/out_' + str(i) + '.png', dst[ y : y+h, x : x+w ])
        print "Outputted calibrated image: 'calibrated/out_" + str(i) + ".png'"
Пример #9
0
    def undistort_image(self, img, Kundistortion=None):
        """
        Transform grayscale image such that radial distortion is removed.

        :param img: input image
        :type img: np.ndarray, shape=(n, m) or (n, m, 3)
        :param Kundistortion: camera matrix for undistorted view, None for self.K
        :type Kundistortion: array-like, shape=(3, 3)
        :return: transformed image
        :rtype: np.ndarray, shape=(n, m) or (n, m, 3)
        """
        if Kundistortion is None:
            Kundistortion = self.K
        if self.calibration_type == 'opencv':
            return cv2.undistort(img, self.K, self.opencv_dist_coeff, newCameraMatrix=Kundistortion)
        elif self.calibration_type == 'opencv_fisheye':
                return cv2.fisheye.undistortImage(img, self.K, self.opencv_dist_coeff, Knew=Kundistortion)
        else:
            xx, yy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))
            img_coords = np.array([xx.ravel(), yy.ravel()])
            y_l = self.undistort(img_coords, Kundistortion)
            if img.ndim == 2:
                return griddata(y_l.T, img.ravel(), (xx, yy), fill_value=0, method='linear')
            else:
                channels = [griddata(y_l.T, img[:, :, i].ravel(), (xx, yy), fill_value=0, method='linear')
                            for i in xrange(img.shape[2])]
                return np.dstack(channels)
def warp(img):
    img = cv2.undistort(img, mtx, dist, None, mtx)
    preprocess_image = np.zeros_like(img[:,:,0])
    gradx = abs_sobel_thresh(img, orient='x', thresh=(12, 255))
    grady = abs_sobel_thresh(img, orient='y', thresh=(25, 255))

    c_binary = color_thresh(img, sthresh=(60, 255), vthresh=(50, 255), lthresh=(75, 255))
    preprocess_image[((gradx == 1) & (grady == 1) | (c_binary == 1))] = 255

    img_size = (img.shape[1], img.shape[0])

    # map trapezium view of the lane to
    bot_width = .76     # percentage of width of bottom of trapezium
    mid_width = .08     # top of trapezium width
    height_pct = .62    # height of trapezium %age of image ht
    bottom_trim = .935  # ignoring the hood of the car
    src = np.float32([
        [img.shape[1]*(.5-mid_width/2), img.shape[0]*height_pct],
        [img.shape[1]*(.5+mid_width/2), img.shape[0]*height_pct],
        [img.shape[1]*(.5+bot_width/2), img.shape[0]*bottom_trim],
        [img.shape[1]*(.5-bot_width/2), img.shape[0]*bottom_trim],
    ])
    # map to a rectangle
    offset = img.shape[1]*.25
    dst = np.float32([
        [offset, 0],
        [img.shape[1]-offset, 0],
        [img.shape[1]-offset, img.shape[0]],
        [offset, img.shape[0]]
    ])
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    warped = cv2.warpPerspective(preprocess_image, M, img_size, flags=cv2.INTER_LINEAR)
    return warped, M, Minv
Пример #11
0
def undistort(mtx, dist, objpoints, imgpoints, rvecs, tvecs, img):
    #read in an image of choice
    #usefule for the reading in and segmenting of board to make hough lines easier
    h, w = img.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))

        # undistort
    dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    cv2.imwrite('calibresult.png', dst)

    # undistort
    mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5)
    dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    cv2.imwrite('calibresult.png', dst)

    mean_error = 0
    for i in xrange(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
        mean_error += error

    print "total error: ", mean_error / len(objpoints)
Пример #12
0
def calibrationExample():
    camNum =0           # The number of the camera to calibrate
    nPoints = 5         # number of images used for the calibration (space presses)
    patternSize=(9,6)   #size of the calibration pattern
    saveImage = False

    calibrated, camera_matrix,dist_coefs,rms = SIGBTools.calibrateCamera(camNum,nPoints,patternSize,saveImage)
    K = camera_matrix
    cam1 =Camera( np.hstack((K,np.dot(K,np.array([[0],[0],[-1]])) )) )
    cam1.factor()
    #Factor projection matrix into intrinsic and extrinsic parameters
    print "K=", cam1.K
    print "R=", cam1.R
    print "t", cam1.t
    
    if (calibrated):
        capture = cv2.VideoCapture(camNum)
        running = True
        while running:
            running, img =capture.read()
            imgGray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            
            ch = cv2.waitKey(1)
            if(ch==27) or (ch==ord('q')): #ESC
                running = False
            img=cv2.undistort(img, camera_matrix, dist_coefs )
            found,corners=cv2.findChessboardCorners(imgGray, patternSize  )
            if (found!=0):
                cv2.drawChessboardCorners(img, patternSize, corners,found)
            cv2.imshow("Calibrated",img)
Пример #13
0
def computeCameraMatrix():
    counter = int(x=1)

    h, w = 0, 0
    for fname in images:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        h, w = gray.shape[:2]

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, pattern_size, None)
        # If found, add object points, image points (after refining them)
        if ret == True:
            objpoints.append(objp)
            cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

            imgpoints.append(corners)
            # Draw and display the corners
            cv2.drawChessboardCorners(img, pattern_size, corners, ret)
            cv2.imshow("img", img)

            rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (w, h))

            newcameramtx, roi = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h))

            dst = cv2.undistort(gray, camera_matrix, dist_coefs, None, newcameramtx)
            cv2.imshow("undistort image", dst)
            cv2.waitKey(100)
            counter = counter + 1
        else:
            print ("No corners found on Picture " + str(counter))

    cv2.destroyAllWindows()
Пример #14
0
def getImage():
     # initialize the camera and grab a reference to the raw camera capture
     camera = PiCamera()
     camera.resolution = (640, 480)
     camera.framerate = 32
     rawCapture = PiRGBArray(camera, size=(640, 480))

     # print("setting is end!")

     # allow the camera to warmup
     # time.sleep(0.1)
     dst = None
     # capture frames from the camera
     for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
	# grab the raw NumPy array representing the image, then initialize the timestamp
	# and occupied/unoccupied text
	image = frame.array

	mtx =np.array([[100, 0, 320], [0, 100, 240], [0, 0, 1]], dtype=np.float32)  
	dist = np.array([-0.009, 0.0, 0.0, 0.0], dtype=np.float32)
	h,  w = image.shape[:2]
	newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
	# undistort
	dst = cv2.undistort(image, mtx, dist, None, newcameramtx)
       break

     return dst
Пример #15
0
def videoMatch2(calibFile,videoFile,startFrame,endFrame):
	K, distort  = readCalibrationData(calibFile)
	print(K)
	print(distort)
	cap = cv2.VideoCapture(videoFile)
	skipFrames(cap,startFrame)
	print("Current Frame: "+ str(cap.get(1))+"/"+ str(cap.get(7)))
	ret, firstFrame = cap.read()
	skipFrames(cap,endFrame-startFrame)
	ret, secondFrame = cap.read()
	patches = match(firstFrame.copy(),secondFrame.copy(),K,distort)
	cubePositions = [p.x for i,p in enumerate(patches) if randint(0,3)==0] #np.array([0,0,50,1])#
	#print(patches)
	#cv2.waitKey(0)
	cap.set(1,startFrame)
	for frame in range(startFrame,int(cap.get(7))):
		ret, img = cap.read()
		img = cv2.undistort(img,K,distort)
		corr = correspondences.getPatchCorrespondences(patches,img)
		#for c in corr:
		#	cv2.circle(img, tuple(c[0].astype(int)),3, (0,0,255),-1)
		#	cv2.line(img, tuple(c[0].astype(int)), tuple(c[1].pt1.astype(int)), (0,0,255),1)
		if(len(corr)>7):
			p = computervision.findTransformation(K, corr)
			for cube in cubePositions:
				projectCube(img,p,cube)
		cv2.imwrite("out/"+str(frame)+".png",img)
		cv2.imshow("test2", img)
		cv2.waitKey(1)
	cap.release()
def UndistortImage(image, intrinsic_matrix, distCoeff):
    # 1 Undistort the Image
    undistorted_image = cv2.undistort(image, intrinsic_matrix, distCoeff, None)
    
    # 2 return the undistorted image
    #undistorted_imgList.append(undistorted_img)
    return undistorted_image
Пример #17
0
def cameraMatch(calibFile):
	K, distort  = readCalibrationData(calibFile)
	print(K)
	print(distort)
	cap = cv2.VideoCapture(0)
	while(True):
		ret, firstFrame = cap.read()
		cv2.imshow("test2", firstFrame)
		if cv2.waitKey(1)!=-1:
			break
	print("first pic taken")
	while(True):
		ret, secondFrame = cap.read()
		cv2.imshow("test2", secondFrame)
		if cv2.waitKey(1)!=-1:
			break
	print("second pic taken")
	patches = match(firstFrame.copy(),secondFrame.copy(),K,distort)
	cubePositions = [p.x for i,p in enumerate(patches) if randint(0,1)==0] #np.array([0,0,50,1])#
	#print(patches)
	while(True):
		ret, img = cap.read()
		img = cv2.undistort(img,K,distort)
		corr = correspondences.getPatchCorrespondences(patches,img)
		#for c in corr:
		#	cv2.circle(img, tuple(c[0].astype(int)),3, (0,0,255),-1)
		#	cv2.line(img, tuple(c[0].astype(int)), tuple(c[1].pt1.astype(int)), (0,0,255),1)
		if(len(corr)>7):
			p = computervision.findTransformation(K, corr)
			for cube in cubePositions:
				projectCube(img,p,cube)
		cv2.imshow("test2", img)
		if cv2.waitKey(1)!=-1:
			break
	cap.release()
Пример #18
0
def _cleanImage(image):
	h,  w = image.shape[:2]
	newcameramtx, roi=cv2.getOptimalNewCameraMatrix(camera_matrix,dist_coefs,(w,h),1,(w,h))
	dst = cv2.undistort(image, camera_matrix, dist_coefs, None, newcameramtx)
	x,y,w,h = roi
	dst = dst[y:y+h, x:x+w]
	return dst
Пример #19
0
def undistort_image(image, shot):
    """Remove radial distortion from a perspective image."""
    camera = shot.camera
    height, width = image.shape[:2]
    K = opencv_calibration_matrix(width, height, camera.focal)
    distortion = np.array([camera.k1, camera.k2, 0, 0])
    return cv2.undistort(image, K, distortion)
Пример #20
0
def undistort_img(img, points3d, points2d):
    img_size = (img.shape[1], img.shape[0])

    # Do camera calibration given object points and image points
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(points3d, points2d, img_size, None, None)
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    return dst
Пример #21
0
def main(file_list, outdir):
    # copy parameters to arrays
    K = np.array([[1743.23312, 0, 2071.06177], [0, 1741.57626, 1476.48298], [0, 0, 1]])
    d = np.array([-0.307412748, 0.300929683, 0, 0, 0])  # just use first two terms (no translation)

    logging.debug("Starting loop")
    for image in file_list:
        logging.debug("var %s", image)
        imgname = image.split("/")[-1]
        new_image_path = os.path.join(outdir, imgname)
        if not os.path.exists(new_image_path):
            logging.debug("Undistorting %s . . . ", imgname)
            # read one of your images
            img = cv2.imread(image)
            h, w = img.shape[:2]

            # un-distort
            newcamera, roi = cv2.getOptimalNewCameraMatrix(K, d, (w, h), 0)
            newimg = cv2.undistort(img, K, d, None, newcamera)

            # cv2.imwrite("original.jpg", img)
            cv2.imwrite(new_image_path, newimg)

            # Write metadata
            old_meta = pyexiv2.ImageMetadata(image)
            new_meta = pyexiv2.ImageMetadata(new_image_path)
            old_meta.read()
            new_meta.read()
            old_meta.copy(new_meta)
            new_meta.write()
        else:
            logging.debug("Image already processed")
Пример #22
0
def undistort_image(imagepath, calib_file, visulization_flag):
    """ undistort the image and visualization

    :param imagepath: image path
    :param calib_file: includes calibration matrix and distortion coefficients
    :param visulization_flag: flag to plot the image
    :return: none
    """
    mtx, dist = load_calibration(calib_file)

    img = cv2.imread(imagepath)

    # undistort the image
    img_undist = cv2.undistort(img, mtx, dist, None, mtx)
    img_undistRGB = cv2.cvtColor(img_undist, cv2.COLOR_BGR2RGB)

    if visulization_flag:
        imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        f, (ax1, ax2) = plt.subplots(1, 2)
        ax1.imshow(imgRGB)
        ax1.set_title('Original Image', fontsize=30)
        ax1.axis('off')
        ax2.imshow(img_undistRGB)
        ax2.set_title('Undistorted Image', fontsize=30)
        ax2.axis('off')
        plt.show()

    return img_undistRGB
	def undistortImage(self,img):
		if (self.calibrationData == False):
			return img

		h,  w = img.shape[:2]
		img = cv2.undistort(img, np.asarray(self.calibrationData['camera_matrix']), np.asarray(self.calibrationData['dist_coeff']), None)
		return img
Пример #24
0
    def doPreprocess(self):
        '''
        All processing before analysing separate chambers
        '''
        # Discarding color information
        if self.frame is None:
            return None
        frame = cv2.cvtColor(self.frame, cv2.cv.CV_RGB2GRAY);
        
        # Inverting frame if needed        
        if self.invertImage :
            frame = cv2.bitwise_not(frame)
        
        if self.removeBarrel :
            distCoeff = np.zeros((4,1),np.float64)
            distCoeff[0,0] = self.removeBarrelCoef
            
            # assume unit matrix for camera
            cam = np.eye(3,dtype=np.float32)
            
            height, width  = frame.shape[0],frame.shape[1],
            
            cam[0,2] = width/2.0  # define center x
            cam[1,2] = height/2.0 # define center y
            cam[0,0] = float(self.removeBarrelFocal)        # define focal length x
            cam[1,1] = cam[0,0]        # define focal length y

            # here the undistortion will be computed
            frame = cv2.undistort(frame,cam,distCoeff)
                
        self.signalNextFrame.emit(frame)
Пример #25
0
def read_left():
    global l, left, newcamera

    while True:
        _, x = left.read()
        # x = cv2.GaussianBlur(x, (9, 9), 0)
        l = cv2.undistort(x, k, d, None, newcamera)
Пример #26
0
def calibration(dis_frame):
    cameraMatrix=np.array([[133.77965, 0, 179.13740],
                           [0, 133.75846, 111.34959],
                           [0, 0, 1]])
    distCoeffs=np.array([-0.31482, 0.10522, 0.000387, 0.00001367, -0.01685])
    output_frame = cv2.undistort(dis_frame, cameraMatrix,distCoeffs,None)
    return output_frame
Пример #27
0
def undistort(img, corners, profile_name):
    profile = load_calibration_profile(profile_name)
    img, corners = _uncrop(img, corners)
    if len(img.shape) == 3:
        # XXX Hack. Fix _uncrop!
        img = img[:, :, 1]

    if img.shape[:2] not in profile:
        raise ValueError('No calibration settings for input image size.')

    settings = profile[img.shape[:2]]
    height, width = img.shape[:2]

    # Undistort corners. OpenCV expects (x, y) and a 3D array.
    corners = np.array([np.fliplr(corners)])
    undist_corners = cv2.undistortPoints(corners, settings.camera_matrix,
                                         settings.distort_coeffs,
                                         P=settings.camera_matrix)
    undist_corners = np.fliplr(undist_corners[0])

    undist_img = cv2.undistort(img, settings.camera_matrix,
                               settings.distort_coeffs)

    log.debug('Undistorted image, shape={}, calibration_profile={}'.format(
              img.shape, profile_name))

    return undist_img, undist_corners
Пример #28
0
def undistort_brown_image(image, camera, new_camera):
    """Remove radial distortion from a brown image."""
    height, width = image.shape[:2]
    K = camera.get_K_in_pixel_coordinates(width, height)
    distortion = np.array([camera.k1, camera.k2, camera.p1, camera.p2, camera.k3])
    new_K = new_camera.get_K_in_pixel_coordinates(width, height)
    return cv2.undistort(image, K, distortion, new_K)
def unwarp(img, nx, ny, mtx, dist):
    img_size = (img.shape[1], img.shape[0])

    corners = np.float32([[190,720],[589,457],[698,457],[1145,720]])
    new_top_left=np.array([corners[0,0],0])
    new_top_right=np.array([corners[3,0],0])
    offset=[150,0]


    src = np.float32([corners[0],corners[1],corners[2],corners[3]])
    dst = np.float32([corners[0]+offset,new_top_left+offset,new_top_right-offset ,corners[3]-offset])

    src = np.float32([(575,464),(707,464),(258,682),(1049,682)])
    dst = np.float32([(450,0),(img_size[0]-450,0),(450,img_size[1]),(img_size[0]-450,img_size[1])])

    undist = cv2.undistort(img, mtx, dist, None, mtx)

    # Given src and dst points, calculate the perspective transform matrix
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    # Warp the image using OpenCV warpPerspective()
    warped = cv2.warpPerspective(undist, M, img_size,flags=cv2.INTER_LINEAR)

    # Return the resulting image and matrix
    return warped, M, Minv
def cal_undistort(img, objpoints, imgpoints):
    img_size = (img.shape[1], img.shape[0])
    # Use cv2.calibrateCamera() and cv2.undistort()
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    
    return dst
Пример #31
0
    "C:/Users/wicip/Documents/maryam_final/Codes/Calibration/dist_pickle.p",
    "rb")
array = pickle.load(pickle_in)
mtx = array['mtx']
dist = array['dist']
pickle_in.close()

# load the raw images that will be calibrated
mypath = "C:/Users/wicip/Documents/maryam_final/Images/Raw_Images/"
dirs = os.listdir(mypath)
files = []
for i in range(len(dirs)):
    if "Car" in dirs[i]:
        files.append(dirs[i])

# for loop goes through every image to calibrate it, and then save it
save_path = "C:/Users/wicip/Documents/maryam_final/Images/Camera_Calibration/cal_cars"
for i in range(len(files)):
    new_path = os.path.join(mypath, files[i])
    for filename in os.listdir(new_path):
        location = new_path + '/' + filename
        # read in image
        img = cv2.imread(location)
        if img is not None:
            # undistort image
            undist_img = cv2.undistort(img, mtx, dist, None, mtx)
            # save image
            img_name = save_path + '/cal_' + filename
            cv2.imwrite(img_name, undist_img)

# END OF CODE
Пример #32
0
'''

# Write the number of inside corners of the Chessboard used to Calibrate.
nx = 9  #Inside corners in x
ny = 6  #Inside corners in y
# Read in and make a list of calibration images
cal_imgs = glob.glob('camera_cal/calibration*.jpg')

ret, mtx, dist = cal_cam(cal_imgs, nx, ny)
'''
#The following parameters were obteined by calibrationg the camera with the cal_images
ret = 0.9230912642921472
mtx = np.array([[1.15660712e+03, 0.00000000e+00, 6.68960302e+02],
      [0.00000000e+00, 1.15164235e+03, 3.88057002e+02],
      [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
dist = np.array([[-0.23185386, -0.11832054, -0.00116561,  0.00023902,  0.15356159]])
'''

#print a cal_image (original vs undistorted) to test the dist & mtx
#org = mpimg.imread('camera_cal/calibration1.jpg')
org = mpimg.imread('test_images/test3.jpg')
dst = cv2.undistort(org, mtx, dist, None, mtx)

f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(org, cmap='gray')
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(dst, cmap='gray')
ax2.set_title('Final Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
#############################################################################
Пример #33
0
cv.ocl.setUseOpenCL(True)
print("uses opencl: " + str(cv.ocl.useOpenCL()))

font = cv.FONT_HERSHEY_SIMPLEX

cap = cv.VideoCapture(path_vids + '/' + sys.argv[1])
pt = ThreadPool(6)
utils.initialize_database()

while (cap.isOpened()):
    #    global wait
    ret, frame = cap.read()
    im = frame.copy()
    C_scale, roi = cv.getOptimalNewCameraMatrix(C_W, D_W, im.shape[:-1], 1,
                                                im.shape[:-1])
    im_rect = cv.undistort(im, C_W, D_W, None, C_W)
    cv.imshow('rectified stream', im_rect)
    k = cv.waitKey(wait) & 0xFF  #wait 40ms for 25 FPS
    if (k == ord('s')):  #s for save, q for quit
        time_label = strftime("%d %b %H-%M-%S", gmtime())
        print(time_label)
        cv.imwrite(
            '/home/youssef/Documenten/Projectcomputervisie/testImages/' +
            sys.argv[1] + '/rectified_calibM_' + str(time_label) + '.png',
            im_rect)
        #cv.imwrite('D:/School/2018-2019/Project CV - Paintings/Testimgs/rectified_calibM_'+str(time_label)+'.png', im_rect)

####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
    if (k == ord('p')):  #p to request a prediction
def undistortion_function(img, mtx=mtx, dist=dist):
    distorsion_correction = cv2.undistort(img, mtx, dist, None, mtx)
    return distorsion_correction
Пример #35
0
def undistorted(img, mtx, dist):
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    return dst
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                   gray.shape[::-1], None,
                                                   None)

if args['savepath']:
    if not os.path.isdir('./undistorted_images'):
        os.mkdir('./undistorted_images')
    for fname in images:
        img = cv2.imread(fname)
        img_name = fname.split(os.sep)[-1].split('.')[0]
        h, w = img.shape[:2]
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
            mtx, dist, (w, h), 1, (w, h))

        # undistort
        dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

        # crop the image
        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]
        cv2.imwrite('./undistorted_images/undistorted_{}.jpg'.format(img_name),
                    dst)
    cv2.destroyAllWindows()

#calculate re-projection error
tot_error = 0
for i in range(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx,
                                      dist)
    error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
    tot_error += error
    def camera_calibrate(self, nx=6, ny=9, l=33, path='Pictures'):
        '''
        Chessboard should be used.

        nx - number of inside corners in x;
        ny - number of inside corners in y;
        l - width of the square (in mm);

        reference: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html#calibration
        '''
        # prepare object points

        #CALIBRATING THE CAMERA
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                    0.001)

        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(nx,ny,0)
        objp = np.zeros((ny * nx, 3), np.float32)
        objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)

        # Arrays to store object points and image points from all the images.
        objpoints = []  # 3d point in real world space
        imgpoints = []  # 2d points in image plane.

        #Finding all the images .jpg
        os.chdir(self.script_path + '/' + path)
        images = glob.glob('*.jpg')
        print(images)
        print('Unrecognized images (if any):')
        #Loop for finding corners, calculating image points and object points
        examples = []
        for fname in images:
            img = cv2.imread(fname)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # Find the chess board corners
            ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

            # If found, add object points, image points (after refining them)
            if ret:
                objpoints.append(objp)
                corners2 = cv2.cornerSubPix(gray, corners, (9, 9), (-1, -1),
                                            criteria)
                imgpoints.append(corners2)
                examples.append(gray)
                # Draw and display the corners
                img = cv2.drawChessboardCorners(img, (nx, ny), corners2, ret)

                cv2.imshow('ImageWindow', img)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            else:
                #if some images are uncorrect
                print(fname)  #to may be delete them manually after that
        #Calibration
        ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
            objpoints, imgpoints, gray.shape[::-1], None, None)
        #dist – Output vector of distortion coefficients
        # (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.
        #rvecs – Output vector of rotation vectors
        #ret-
        #mtx- Output 3x3 floating-point camera matrix
        #tvecs – Output vector of translation vectors estimated for each pattern view.

        #Store the parameters in the folder
        #Starting undistortion

        # Read in an image
        gray = examples[int(np.random.randint(0, len(examples),
                                              1))]  #you may put here any photo
        # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        h, w = img.shape[:2]

        #Returns the new (optimal) camera matrix based on the free scaling parameter.
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
            mtx, dist, (w, h), 0, (w, h))
        #undistort
        img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
        dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
        cv2.imwrite('object_size_undistorted.png', dst)
        np.savez('Parameters',
                 ret=ret,
                 matrix=mtx,
                 distance=dist,
                 rotation=rvecs,
                 translation=tvecs,
                 newmatrix=newcameramtx,
                 roi=roi)
        # undistort
        dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
        #cv2.imwrite('object_measure.png', dst)
        # crop the image
        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]

        #Plotting
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
        f.tight_layout()
        ax1.imshow(img)
        ax1.set_title('Original Image', fontsize=20)
        ax2.imshow(dst)
        ax2.set_title('Undistorted Image', fontsize=20)
        plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
        plt.suptitle('CALIBRATION OF THE CAMERA')
        plt.show()
        #calculating the reprojection error
        tot_error = 0
        for i in range(len(objpoints)):
            imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i],
                                              mtx, dist)
            error = cv2.norm(imgpoints[i], imgpoints2,
                             cv2.NORM_L2) / len(imgpoints2)
            tot_error += error
        print("reprojection error = %.2f \n" % (tot_error / len(objpoints)))
Пример #38
0
def main():
    import sys
    import getopt
    from glob import glob

    args, img_mask = getopt.getopt(sys.argv[1:], '',
                                   ['debug=', 'square_size=', 'threads='])
    args = dict(args)
    args.setdefault('--debug', './output/')
    args.setdefault('--square_size', 1.0)
    args.setdefault('--threads', 4)
    if not img_mask:
        img_mask = '../data/left??.jpg'  # default
    else:
        img_mask = img_mask[0]

    img_names = glob(img_mask)
    debug_dir = args.get('--debug')
    if debug_dir and not os.path.isdir(debug_dir):
        os.mkdir(debug_dir)
    square_size = float(args.get('--square_size'))

    pattern_size = (9, 6)
    pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
    pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
    pattern_points *= square_size

    obj_points = []
    img_points = []
    h, w = cv2.imread(img_names[0], cv2.IMREAD_GRAYSCALE
                      ).shape[:2]  # TODO: use imquery call to retrieve results

    def processImage(fn):
        print('processing %s... ' % fn)
        img = cv2.imread(fn, 0)
        if img is None:
            print("Failed to load", fn)
            return None

        assert w == img.shape[1] and h == img.shape[0], (
            "size: %d x %d ... " % (img.shape[1], img.shape[0]))
        found, corners = cv2.findChessboardCorners(img, pattern_size)
        if found:
            term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
            cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)

        if debug_dir:
            vis = cv2.cv2tColor(img, cv2.COLOR_GRAY2BGR)
            cv2.drawChessboardCorners(vis, pattern_size, corners, found)
            _path, name, _ext = splitfn(fn)
            outfile = os.path.join(debug_dir, name + '_chess.png')
            cv2.imwrite(outfile, vis)

        if not found:
            print('chessboard not found')
            return None

        print('           %s... OK' % fn)
        return (corners.reshape(-1, 2), pattern_points)

    threads_num = int(args.get('--threads'))
    if threads_num <= 1:
        chessboards = [processImage(fn) for fn in img_names]
    else:
        print("Run with %d threads..." % threads_num)
        from multiprocessing.dummy import Pool as ThreadPool
        pool = ThreadPool(threads_num)
        chessboards = pool.map(processImage, img_names)

    chessboards = [x for x in chessboards if x is not None]
    for (corners, pattern_points) in chessboards:
        img_points.append(corners)
        obj_points.append(pattern_points)

    # calculate camera distortion
    rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(
        obj_points, img_points, (w, h), None, None)

    print("\nRMS:", rms)
    print("camera matrix:\n", camera_matrix)
    print("distortion coefficients: ", dist_coefs.ravel())

    # undistort the image with the calibration
    print('')
    for fn in img_names if debug_dir else []:
        path, name, ext = splitfn(fn)
        img_found = os.path.join(debug_dir, name + '_chess.png')
        outfile = os.path.join(debug_dir, name + '_undistorted.png')

        img = cv2.imread(img_found)
        if img is None:
            continue

        h, w = img.shape[:2]
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
            camera_matrix, dist_coefs, (w, h), 1, (w, h))

        dst = cv2.undistort(img, camera_matrix, dist_coefs, None, newcameramtx)

        # crop and save the image
        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]

        print('Undistorted image written to: %s' % outfile)
        cv2.imwrite(outfile, dst)

    print('Done')
def process_single_image():
    """
    Function for processing one picture of choice, analyzing it and adopt parameters
    """

    # Uncomment for getting all the plots immediately
    #plt.ion()

    # Play around with camera calibration and print out result for storage below
    #mtx, dist = get_cameracalib("camera_cal")
    #print('mtx = {}'.format(mtx))
    #print('dist = {}'.format(dist))

    ### Example calibrations derived from one picture (calibration3) and
    ### of all pictures in folder 'test_images'
    # calibration3
    #mtx = [[  1.16949654e+03   0.00000000e+00   6.68285391e+02]
    #       [  0.00000000e+00   1.14044391e+03   3.59592347e+02]
    #       [  0.00000000e+00   0.00000000e+00   1.00000000e+00]]
    #dist = [[-0.25782023 -0.0988196   0.01240745  0.00407057  0.52894628]]

    # average
    #mtx = [[  1.15396093e+03   0.00000000e+00   6.69705359e+02]
    #       [  0.00000000e+00   1.14802495e+03   3.85656232e+02]
    #       [  0.00000000e+00   0.00000000e+00   1.00000000e+00]]
    #dist = [[ -2.41017968e-01  -5.30720497e-02  -1.15810318e-03  -1.28318543e-04 2.67124302e-02]]

    # The chosen values for camera matrix and camera distortion
    mtx = np.array([[1.15396093e+03, 0.00000000e+00, 6.69705359e+02],
                    [0.00000000e+00, 1.14802495e+03, 3.85656232e+02],
                    [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
    dist = np.array([[
        -2.41017968e-01, -5.30720497e-02, -1.15810318e-03, -1.28318543e-04,
        2.67124302e-02
    ]])

    # Definition of images, use one at a time
    image = mpimg.imread('./test_images/straight_lines1.jpg')
    #image = mpimg.imread('./test_images/straight_lines2.jpg')
    #image = mpimg.imread('./test_images/test1.jpg')
    #image = mpimg.imread('./test_images/test2.jpg')
    #image = mpimg.imread('./test_images/test3.jpg')
    #image = mpimg.imread('./test_images/test4.jpg')
    #image = mpimg.imread('./test_images/test5.jpg')
    #image = mpimg.imread('./test_images/test6.jpg')
    ### Extra images taken out of the video describing different situation at crossing the first bridge
    #image = mpimg.imread('./Snapshots/vlcsnap-01.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-02.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-03.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-04.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-05.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-06.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-07.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-08.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-09.jpg')
    #image = mpimg.imread('./Snapshots/vlcsnap-10.jpg')

    # Undistorting image
    image = cv2.undistort(image, mtx, dist, None, mtx)
    plotimg(image)

    # Choose this one if you want to play around with edge filtereing, color transformation
    if 0:
        playground_grad_color(image)

    # Use the chosen edge detection and color transformation for filtering the image
    filtered_image = my_grad_color_filter(image)
    plotimg(filtered_image)

    # Generate the warped image
    birdimage, perspective_M, Minv = corners_unwarp(filtered_image)
    plot2img(image, birdimage)

    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = birdimage.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])

    # Transforming one dimensional, filtered and warped image into 3 channel image
    out_img = np.uint8(np.dstack((birdimage, birdimage, birdimage)) * 255)

    ### Simulating first time lane detection based upon window methodology
    left_lane_inds, right_lane_inds, win_img = lineidx_detection_per_window(
        birdimage, nonzerox, nonzeroy)

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds]
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]

    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)

    # Combine image with windows and 3-dim bird image, print it out
    plt_img = cv2.addWeighted(out_img, 1, win_img, 1.0, 0)
    plot_lanelines(plt_img, left_fit, right_fit, left_lane_inds,
                   right_lane_inds, nonzerox, nonzeroy)

    ### Simulating second time lane detection based upon search area method
    nonzero = birdimage.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])

    # lane detection based upon search area method (requiring fitted polynoms)
    left_lane_inds, right_lane_inds, win_img = lineidx_detection_per_fit(
        birdimage, nonzerox, nonzeroy, left_fit, right_fit)

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds]
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)

    # Combine image with search area and 3-dim bird image, print it out
    plt_img = cv2.addWeighted(out_img, 1, win_img, 0.3, 0)
    plot_lanelines(plt_img, left_fit, right_fit, left_lane_inds,
                   right_lane_inds, nonzerox, nonzeroy)

    # Calculate curvature and offset from center of road for sanity check and print out into final picture
    left_curverad, right_curverad = calc_curvature(leftx, rightx, lefty,
                                                   righty)
    xoffset = calc_xoffset(left_fit, right_fit, image.shape)
    sanity = sanity_check(left_fit, right_fit, left_curverad, right_curverad,
                          image.shape)

    # Unwarp bird view image, integrate the polynom lines as marker into the image
    image = gen_img_w_marks(image, birdimage, left_fit, right_fit, Minv)

    # Add curvature, offset, and sanity information into final image
    str = ("left curve:   {:7.2f}m".format(left_curverad))
    cv2.putText(image,
                str, (100, 40),
                cv2.FONT_HERSHEY_TRIPLEX,
                fontScale=1.2,
                color=(255, 255, 255),
                thickness=1,
                lineType=cv2.LINE_AA)
    str = ("right curve: {:7.2f}m".format(right_curverad))
    cv2.putText(image,
                str, (100, 80),
                cv2.FONT_HERSHEY_TRIPLEX,
                fontScale=1.2,
                color=(255, 255, 255),
                thickness=1,
                lineType=cv2.LINE_AA)
    str = ("Center offset:  {:5.2f}m".format(xoffset))
    cv2.putText(image,
                str, (100, 120),
                cv2.FONT_HERSHEY_TRIPLEX,
                fontScale=1.2,
                color=(255, 255, 255),
                thickness=1,
                lineType=cv2.LINE_AA)
    str = ("Sanity: {}".format(sanity))
    cv2.putText(image,
                str, (100, 160),
                cv2.FONT_HERSHEY_TRIPLEX,
                fontScale=1.2,
                color=(255, 255, 255),
                thickness=1,
                lineType=cv2.LINE_AA)

    # Plot the final image
    plotimg(image)
def process_images(image):
    """
    Function for processing consecutive images of a movie
    
    Input: RGB Image 'image'
    Output: Image with masked lane area and curvature, offset information ('marked_image')
    """

    # Information if the first image has been analyzed => use search area method
    global first_pic_done
    # Storage of parameter for polynom of lines
    global left_fit_glob
    global right_fit_glob

    # Chosen camera matrix and distortion values
    mtx = np.array([[1.15396093e+03, 0.00000000e+00, 6.69705359e+02],
                    [0.00000000e+00, 1.14802495e+03, 3.85656232e+02],
                    [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
    dist = np.array([[
        -2.41017968e-01, -5.30720497e-02, -1.15810318e-03, -1.28318543e-04,
        2.67124302e-02
    ]])

    # Undistorting image
    image = cv2.undistort(image, mtx, dist, None, mtx)

    # Color transformation and filtering according to best method evaluated
    filtered_image = my_grad_color_filter(image)

    # Warping image
    birdimage, perspective_M, Minv = corners_unwarp(filtered_image)

    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = birdimage.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])

    # Transforming one-dimensional birds view image into 3-dim image
    out_img = np.uint8(np.dstack((birdimage, birdimage, birdimage)) * 255)

    # Choosing either window method (first picture) or search area (all other pictures) for lane line detection
    if first_pic_done:
        left_fit = left_fit_glob
        right_fit = right_fit_glob

        left_lane_inds, right_lane_inds, win_img = lineidx_detection_per_fit(
            birdimage, nonzerox, nonzeroy, left_fit, right_fit)
    else:
        left_lane_inds, right_lane_inds, win_img = lineidx_detection_per_window(
            birdimage, nonzerox, nonzeroy)
        first_pic_done = 1

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds]
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]

    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)

    # Calculate curvature and offset from center of road for sanity check and print out into final picture
    left_curverad, right_curverad = calc_curvature(leftx, rightx, lefty,
                                                   righty)
    xoffset = calc_xoffset(left_fit, right_fit, image.shape)
    sanity = sanity_check(left_fit, right_fit, left_curverad, right_curverad,
                          image.shape)

    ### Placeholder for sanity check
    #if not(first_pic_done) & sanity:
    left_fit_glob = left_fit
    right_fit_glob = right_fit

    # Unwarp bird view image, integrate the polynom lines as marker into the image
    marked_image = gen_img_w_marks(image, birdimage, left_fit_glob,
                                   right_fit_glob, Minv)

    # Add curvature and offset information into final image
    str = ("left curve:   {:7.2f}m".format(left_curverad))
    cv2.putText(marked_image,
                str, (100, 40),
                cv2.FONT_HERSHEY_TRIPLEX,
                fontScale=1.2,
                color=(255, 255, 255),
                thickness=1,
                lineType=cv2.LINE_AA)
    str = ("right curve: {:7.2f}m".format(right_curverad))
    cv2.putText(marked_image,
                str, (100, 80),
                cv2.FONT_HERSHEY_TRIPLEX,
                fontScale=1.2,
                color=(255, 255, 255),
                thickness=1,
                lineType=cv2.LINE_AA)
    str = ("Center offset:  {:5.2f}m".format(xoffset))
    cv2.putText(marked_image,
                str, (100, 120),
                cv2.FONT_HERSHEY_TRIPLEX,
                fontScale=1.2,
                color=(255, 255, 255),
                thickness=1,
                lineType=cv2.LINE_AA)
    #str =("Sanity: {}".format(sanity))
    #cv2.putText(marked_image, str, (100,160), cv2.FONT_HERSHEY_TRIPLEX, fontScale=1.2, color=(255,255,255), thickness=1, lineType = cv2.LINE_AA)

    return marked_image
Пример #41
0
def process_image(img):
    # undistort image
    img_undistorted = cv2.undistort(img, mtx, dist, None, mtx)

    # threshold image
    img_threshold = threshold_img(img_undistorted)

    # Apply perspective transform to the image
    img_warped = cv2.warpPerspective(
        img_threshold, M, img_size,
        flags=cv2.INTER_NEAREST)  # keep same size as input image

    # Find polynomial coefficients for left and right lanes
    left_fit_poly, right_fit_poly, left_fit_poly_pixel, right_fit_poly_pixel, img_out = fit_polynomial(
        img_warped, Line_obj)

    # Calculate the radius of curvature in pixels for both lane lines
    left_curverad, right_curverad = measure_curvature(left_fit_poly,
                                                      right_fit_poly)
    curvature = (left_curverad + right_curverad) * 0.5

    ### Create image for visualization
    img_warp = np.zeros_like(img_undistorted).astype(np.uint8)

    # Recast the x and y points into usable format for cv2.fillPoly()
    ploty = np.linspace(0, img_warp.shape[0] - 1, img_warp.shape[0])
    try:
        left_fitx = left_fit_poly_pixel[0] * ploty**2 + left_fit_poly_pixel[
            1] * ploty + left_fit_poly_pixel[2]
        right_fitx = right_fit_poly_pixel[0] * ploty**2 + right_fit_poly_pixel[
            1] * ploty + right_fit_poly_pixel[2]
    except TypeError:
        # Avoids an error if `left` and `right_fit` are none or incorrect
        print('The function failed to fit a line!')
        left_fitx = 1 * ploty**2 + 1 * ploty
        right_fitx = 1 * ploty**2 + 1 * ploty

    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array(
        [np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(img_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    img_unwarped = cv2.warpPerspective(img_warp, Minv,
                                       (img_warp.shape[1], img_warp.shape[0]))

    # Combine the result with the original image
    result = cv2.addWeighted(img, 1, img_unwarped, 0.3, 0)

    # Calculate vehicle offset
    xm_per_pix = 3.7 / 580
    offset = (
        (left_fitx[-1] + right_fitx[-1]) / 2 - img.shape[1] / 2) * xm_per_pix

    # Print out curvature and offset on image
    font = cv2.FONT_HERSHEY_PLAIN
    cv2.putText(result, "Radius of Curvature = %d (m)" % curvature, (20, 50),
                font, 3, (255, 255, 255), 3)
    cv2.putText(result, "Vehicle is %.2f m left of center" % offset, (20, 100),
                font, 3, (255, 255, 255), 3)

    return result
def get_cameracalib(dirpath):
    """
    Function for calibrating the camera based upon pictures under 'dirpath'
    Optionally, the results can be printed out_img
    
    Input: Path 'dirpath' to pictues for calibration
    Output: Camera matrix 'mtx' and camera distortion 'dst'
    """
    images = []
    imgpoints = []
    objpoints = []
    filenames = []
    # Definition of object points
    objp = np.zeros((9 * 6, 3), np.float32)
    objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
    # Get imagepoints of all suited pictures
    files = listdir(dirpath)
    for f in files:
        if isfile(join(dirpath, f)):
            file = join(dirpath, f)
            print('{}'.format(file), end='')
            img = mpimg.imread(file)
            gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
            # If chessboard corners could be generated in picture, store imagepoints,
            # objectpoints and filename (for optional printout)
            if ret == True:
                img = cv2.drawChessboardCorners(img, (9, 6), corners, ret)
                images.append(img)
                print(': integrated', end='')
                imgpoints.append(corners)
                objpoints.append(objp)
                filenames.append(file)
            print('.')

    # For calibrating the camera on one specific picture (1st part)
    if 0:
        img = mpimg.imread("camera_cal\calibration3.jpg")
        gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
        objpoints, imgpoints = [], []
        objpoints.append(objp)
        imgpoints.append(corners)

    # Calculating camera matrix 'mtx', camera distortion 'dist', etc. (for both options(single or many pictures))
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                       gray.shape[::-1], None,
                                                       None)

    # For calibrating the camera on one specific picture, print out (2nd part)
    if 0:
        img = cv2.drawChessboardCorners(img, (9, 6), corners, ret)
        undistimg = cv2.undistort(img, mtx, dist, None, mtx)
        mpimg.imsave(fname="output_images/calibration3_average.points.jpg",
                     arr=undistimg)

    # Optional output of all pictures and their undistorted one
    if 1:
        f, axes = plt.subplots(nrows=int(len(images) / 2) + len(images) % 2,
                               ncols=4,
                               figsize=(15, 10),
                               sharey=True,
                               num=50)
        for (i, file) in enumerate(images):
            axes[int(i / 2), i % 2 * 2 + 0].imshow(images[i])
            axes[int(i / 2), i % 2 * 2 + 0].set_title('Dist. Image\n{}'.format(
                filenames[i]),
                                                      fontsize=10)
            axes[int(i / 2), i % 2 * 2 + 0].set_axis_off()
            axes[int(i / 2), i % 2 * 2 + 1].imshow(
                cv2.undistort(images[i], mtx, dist, None, mtx))
            axes[int(i / 2), i % 2 * 2 + 1].set_title(
                'Undist. Image\n{}'.format(filenames[i]), fontsize=10)
            axes[int(i / 2), i % 2 * 2 + 1].set_axis_off()
        if (len(images) % 2):
            i += 1
            axes[int(i / 2), i % 2 * 2 + 0].set_axis_off()
            axes[int(i / 2), i % 2 * 2 + 1].set_axis_off()

        plt.show()

    # Handing back the camera matrix 'mtx' and the camera distortion 'dst'
    return (mtx, dist)
Пример #43
0
                                                   imageSize=(1536, 2048),
                                                   cameraMatrix=None,
                                                   distCoeffs=None)
print("Ret:", ret)
print("Mtx:", mtx, " ----------------------------------> [", mtx.shape, "]")
print("Dist:", dist, " ----------> [", dist.shape, "]")
print("rvecs:", rvecs,
      " --------------------------------------------------------> [",
      rvecs[0].shape, "]")
print("tvecs:", tvecs,
      " -------------------------------------------------------> [",
      tvecs[0].shape, "]")

#image rectify
im = cv2.imread('CAM00054_s1.jpg')[..., ::-1]
im_undistorted = cv2.undistort(im, mtx, dist)
plt.subplot(121)
plt.imshow(im)
plt.subplot(122)
plt.imshow(im_undistorted)
plt.show()

#calibration
x, y = np.meshgrid(range(8), range(5))
print("x:\n", x)
print("y:\n", y)

world_points = np.hstack((x.reshape(40, 1), y.reshape(40, 1), np.zeros(
    (40, 1)))).astype(np.float32)
print(world_points)
Пример #44
0
def process_image(image):
    #1. Camera correction
    #Calibration arrays pre-calculated
    img_undist = cv2.undistort(image, mtx, dist, None, mtx)
    global counter

    #2.Magnitude Threshold
    #Threshold color
    yellow_low = np.array([0, 100, 100])
    yellow_high = np.array([50, 255, 255])
    white_low = np.array([18, 0, 180])
    white_high = np.array([255, 80, 255])
    global ref_left
    global ref_right
    global left_fit
    global right_fit

    imgThres_yellow = hls_color_thresh(img_undist, yellow_low, yellow_high)
    imgThres_white = hls_color_thresh(img_undist, white_low, white_high)
    imgThr_sobelx = sobel_x(img_undist, 9, 80, 220)  #Sobel x

    img_mag_thr = np.zeros_like(imgThres_yellow)
    #imgThresColor[(imgThres_yellow==1) | (imgThres_white==1)] =1
    img_mag_thr[(imgThres_yellow == 1) | (imgThres_white == 1) |
                (imgThr_sobelx == 1)] = 1

    #3. Birds-eye
    #Perspective array pre-calculated
    img_size = (img_mag_thr.shape[1], img_mag_thr.shape[0])
    binary_warped = cv2.warpPerspective(img_mag_thr,
                                        M_persp,
                                        img_size,
                                        flags=cv2.INTER_LINEAR)

    #4. Detect lanes and return fit curves

    if counter == 0:
        left_fit, right_fit, out_imgfit = fitlines(binary_warped)
    else:
        left_fit, right_fit = fit_continuous(left_fit, right_fit,
                                             binary_warped)

    #Project video (2.8, 3.5)
    status_sanity, d0, d1 = sanity_check(left_fit, right_fit, 0, .55)
    #Challenge video (2.4,3.1)
    #status_sanity, d0, d1, d2 =sanity_check(left_fit, right_fit, 2.4,3.1)

    #print(left_fit)
    #print(right_fit)

    #Calc curvature and center
    if status_sanity == True:
        #Save as last reliable fit
        ref_left, ref_right = left_fit, right_fit
        counter += 1
    else:  #Use the last realible fit
        left_fit, right_fit = ref_left, ref_right

    left_curv, right_curv, center_off = curvature(left_fit, right_fit,
                                                  binary_warped)

    #Warp back to original and merge with image
    img_merge, img_birds = drawLine(img_undist, binary_warped, left_fit,
                                    right_fit)

    #Composition of images to final display
    img_out = np.zeros((576, 1280, 3), dtype=np.uint8)

    img_out[0:576, 0:1024, :] = cv2.resize(img_merge, (1024, 576))
    #b) Threshold
    img_out[0:288, 1024:1280, 0] = cv2.resize(img_mag_thr * 255, (256, 288))
    img_out[0:288, 1024:1280, 1] = cv2.resize(img_mag_thr * 255, (256, 288))
    img_out[0:288, 1024:1280, 2] = cv2.resize(img_mag_thr * 255, (256, 288))
    #c)Birds eye view
    img_out[310:576, 1024:1280, :] = cv2.resize(img_birds, (256, 266))

    #Write curvature and center in image
    TextL = "Left curv: " + str(int(left_curv)) + " m"
    TextR = "Right curv: " + str(int(right_curv)) + " m"
    TextC = "Center offset: " + str(round(center_off, 2)) + "m"
    #TextAux = str(status_sanity) + ", d0: " + str(round(d0,2)) + ", d1: " + str(round(d1,2))
    fontScale = 1
    thickness = 2

    fontFace = cv2.FONT_HERSHEY_SIMPLEX

    cv2.putText(img_out,
                TextL, (130, 40),
                fontFace,
                fontScale, (255, 255, 255),
                thickness,
                lineType=cv2.LINE_AA)
    cv2.putText(img_out,
                TextR, (130, 70),
                fontFace,
                fontScale, (255, 255, 255),
                thickness,
                lineType=cv2.LINE_AA)
    cv2.putText(img_out,
                TextC, (130, 100),
                fontFace,
                fontScale, (255, 255, 255),
                thickness,
                lineType=cv2.LINE_AA)
    #cv2.putText(img_out, TextAux, (100,130), fontFace, fontScale,(255,255,255), thickness,  lineType = cv2.LINE_AA)

    cv2.putText(img_out,
                "Thresh. view", (1070, 30),
                fontFace,
                .8, (200, 200, 0),
                thickness,
                lineType=cv2.LINE_AA)
    cv2.putText(img_out,
                "Birds-eye", (1080, 305),
                fontFace,
                .8, (200, 200, 0),
                thickness,
                lineType=cv2.LINE_AA)

    #return img_out, binary_warped, left_curv, right_curv, left_fit, right_fit
    return img_out
Пример #45
0
# Get the calibration data
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                   gray.shape[::-1], None,
                                                   None)

capture = cv2.VideoCapture(0)

c = True
while c != 27:
    status, frame = capture.read()
    h, w = frame.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 0,
                                                      (w, h))

    #These are the actual values needed to undistort:
    dst = cv2.undistort(frame, mtx, dist, None, newcameramtx)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]

    cv2.imshow('Original', frame)
    cv2.imshow('Undistorted', dst)

    c = cv2.waitKey(2) & 0xFF

pitch1 = {
    'new_camera_matrix': newcameramtx,
    'roi': roi,
    'camera_matrix': mtx,
    'dist': dist
Пример #46
0
ax = fig.add_subplot(111, projection='3d')

p.show()

pl = None

while True:
    ret, frame1 = cam1.read()
    if not ret:
        exit(1)

    ret, frame2 = cam2.read()
    if not ret:
        exit(1)

    dst1 = cv2.undistort(frame1, mtx1, dist1, None, newcameramtx1)
    gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)

    dst2 = cv2.undistort(frame2, mtx2, dist2, None, newcameramtx2)
    gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

    cv2.imshow('undistorted1', dst1)
    cv2.imshow('undistorted2', dst2)

    ret1, corners1 = cv2.findChessboardCorners(gray1, (9, 6), None)
    ret2, corners2 = cv2.findChessboardCorners(gray2, (9, 6), None)

    point = findBrightnessPoint(frame1)
    frameWithBrightnessPoint = frame1
    cv2.rectangle(frameWithBrightnessPoint, (point[0] - 5, point[1] - 5),
                  (point[0] + 5, point[1] + 5), (255, 0, 0), 1)
Пример #47
0
def move():
    resp = urllib.request.urlopen(
        "http://192.168.0.100/control?var=framesize&val=6")
    h, w = 480, 640
    calib_params = pickle.load(open("camera_calibration.pickle", "rb"))
    mtx = calib_params["mtx"]
    dist = calib_params["dist"]
    filtered_heading_x = 0
    filtered_heading_y = 0
    last_heading = (0, 0)
    P_turn = 0.001 / 320
    I_turn = 0.003 / 1000
    D_turn = 0.0005 / 50
    pid_turn = PID.PID_Control(P_turn, I_turn, D_turn)
    pid_turn.SetPoint = 0
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (640, 480), 1,
                                                      (640, 480))
    tick = 0
    waiting_ticks = 0
    delta_time = 0
    motorSpeed1 = 180
    motorSpeed2 = 150

    create_windows()
    while True:
        resp = urllib.request.urlopen("http://192.168.0.100/capture")
        frame = np.asarray(bytearray(resp.read()), dtype="uint8")
        frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
        frame = cv2.rotate(frame, cv2.ROTATE_180)
        frame = cv2.undistort(frame, mtx, dist, None, newcameramtx)
        # crop the frame to get rid of distortion
        x, y, w, h = roi
        frame = frame[y:y + h, x:x + w]
        global frame_to_save
        frame_to_save = frame.copy()
        cv2.imshow("Original Undistorted", frame_to_save)

        frame, (heading_x, heading_y) = process_img(frame, last_heading)
        filtered_heading_x = filtered_heading_x * 0.8 + heading_x * 0.2
        filtered_heading_y = filtered_heading_y * 0.8 + heading_y * 0.2
        # print(filtered_heading_x, filtered_heading_y)

        error = (int)(filtered_heading_x - frame.shape[1] / 2)
        distance = (int)(frame.shape[0] - filtered_heading_y)

        print("Error:", error, "Dist:", distance, "Heading X: ",
              filtered_heading_x)

        cv2.line(frame, ((int)(filtered_heading_x), 0),
                 ((int)(filtered_heading_x), (int)(frame.shape[0])),
                 [255, 255, 0], 3)
        cv2.line(frame, (0, (int)(filtered_heading_y)),
                 ((int)(frame.shape[1]), (int)(filtered_heading_y)),
                 [255, 255, 0], 3)

        cv2.imshow("Hough lines with Lanes", frame)
        if tick < 20:
            tick = tick + 1
        else:
            if waiting_ticks == 0:
                error_heading = frame.shape[1] / 2 - filtered_heading_x
                travel_mapping.get_orientation_and_odometry_point()

                if -100 < error_heading < 100:
                    print("go straight")
                    spinMotors(motorSpeed1, motorSpeed1, motorSpeed1,
                               motorSpeed1)
                    time.sleep(1.2)
                    spinMotors(0, 0, 0, 0)
                    delta_time += 1
                    waiting_ticks = 10
                elif -100 < error_heading:
                    pid_turn.update(error_heading, delta_time)
                    print("go left: ", pid_turn.output)
                    delta_time = pid_turn.output

                    spinMotors(-motorSpeed1, -motorSpeed1, motorSpeed1,
                               motorSpeed1)
                    time.sleep(abs(pid_turn.output))
                    spinMotors(0, 0, 0, 0)
                    # we dont want to count rotation wheel movements
                    travel_mapping.reset_odometer()

                    spinMotors(motorSpeed2, motorSpeed2, motorSpeed2,
                               motorSpeed2)
                    time.sleep(0.5)
                    delta_time += 0.5
                    spinMotors(0, 0, 0, 0)

                    waiting_ticks = 10
                else:
                    delta_time = 0
                    pid_turn.update(error_heading, delta_time)
                    print("go right: ", pid_turn.output)
                    delta_time = pid_turn.output
                    delta_time += 0.5

                    spinMotors(motorSpeed1, motorSpeed1, -motorSpeed1,
                               -motorSpeed1)
                    time.sleep(abs(pid_turn.output))
                    spinMotors(0, 0, 0, 0)

                    # we dont want to count rotation wheel movements
                    travel_mapping.reset_odometer()

                    spinMotors(90, 90, 90, 90)
                    time.sleep(0.5)
                    spinMotors(0, 0, 0, 0)
                    waiting_ticks = 10

            else:
                pass

        url = "http://192.168.0.99/sensorRead"
        payload = "sensorType=READ%3BAUX_SENSORS%3B&undefined="
        headers = {
            'Content-Type': "application/x-www-form-urlencoded",
            'cache-control': "no-cache"
        }
        response = requests.request("POST", url, data=payload, headers=headers)
        print(response.text)

        waiting_ticks = waiting_ticks - 1
        if waiting_ticks < 0:
            waiting_ticks = 0

        key = cv2.waitKey(1) & 0xFF

        # if the 'q' key is pressed, stop the loop
        if key == ord("q"):
            break
Пример #48
0
    print("\nRMS:", rms)
    print("camera matrix:\n", camera_matrix)
    print("distortion coefficients: ", dist_coefs.ravel())

    # undistort the image with the calibration
    print('')
    for fn in img_names if debug_dir else []:
        path, name, ext = splitfn(fn)
        img_found = os.path.join(debug_dir, name + '_chess.png')
        outfile = os.path.join(debug_dir, name + '_undistorted.png')

        img = cv2.imread(img_found)
        if img is None:
            continue

        h, w = img.shape[:2]
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
            camera_matrix, dist_coefs, (w, h), 1, (w, h))

        dst = cv2.undistort(img, camera_matrix, dist_coefs, None, newcameramtx)

        # crop and save the image
        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]

        print('Undistorted image written to: %s' % outfile)
        cv2.imwrite(outfile, dst)

    cv2.destroyAllWindows()
def calibrate_camera(samples_dir, sample_images_pattern, x_squares=9, y_squares=6, fVerbose=False,
                     verbose_dir='verbose_output'):
    # calibrate, using sample images matching a sample_images_pattern pattern in samples_dir
    # E.g. calibrate_camera('samples', 'sample*.jpg')
    # x_squares, y_squares used to set the chessboard size
    # fVerbose used to turn on/off verbose output, creation of example images etc

    if fVerbose:
        create_dir(verbose_dir)

    # Prepare object points, (0,0,0), (1,0,0), (2,0,0),..., (x_squares,y_squares,0)
    objp = np.zeros((x_squares * y_squares, 3), np.float32)
    objp[:, :2] = np.mgrid[0:x_squares, 0:y_squares].T.reshape(-1, 2)

    # if fVerbose:
    #    print('Object points, objp: ', objp)  # I want to see what this looks like...

    # Arrays to store the obect points and image points from all the images
    objpoints = []  # 3d points in real world spacec
    imgpoints = []  # 2d points in the image plane

    # Get the list of calibration images
    images = glob.glob(samples_dir + '/' + sample_images_pattern)

    # Step over this list of images, searching for the chessboard corners
    x_size, y_size, x_new, y_new = 0, 0, 0, 0  # will find the width/height of the images and save this

    for idx, fname in enumerate(images):
        img = cv2.imread(fname)  # Note: will be in BGR form
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Hence correct param for conversion to grayscale

        # extract and store the x,y size of the images and warn if not the same each time
        # will need this info later for the actual calibration
        x_new = img.shape[1]
        y_new = img.shape[0]
        if x_size > 0:
            if (x_new != x_size) or (y_new != y_size):
                # should have all of the input sizes the same
                print('Warning, images not the same size! ', idx)
        else:
            # save size of first image
            x_size = x_new
            y_size = y_new

        # Now find the corners
        ret, corners = cv2.findChessboardCorners(gray, (x_squares, y_squares), None)

        if ret == True:
            # Found some corners
            objpoints.append(objp)
            imgpoints.append(corners)  # Append the list of 3d corner points found by cv2

            # At this point, can output some images
            if fVerbose:
                print('Output chessboard corners image #', str(idx))
                cv2.drawChessboardCorners(img, (x_squares, y_squares), corners, ret)
                output_img_name = verbose_dir + '/corners' + str(idx) + '.jpg'
                cv2.imwrite(output_img_name, img)

    # got this far, have the object and image points
    if fVerbose:
        print('Found corners complete...')
        # print('Object points array: ', objpoints)
        # print('Image points array : ', imgpoints)

    # Now want to use these points to calibrate the camera
    img_size = (x_new, y_new)  # values saved earlier in the corners loop

    # Call the calibration function, using the object points and image points collected so far
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)

    # Now have all of the calibration matrices

    # Save the result for later, even though we're returning them from this function also
    cal_pickle = {}
    cal_pickle['mtx'] = mtx
    cal_pickle['dist'] = dist
    pickle.dump(cal_pickle, open('calibrate_pickle.p', 'wb'))

    # If we are going verbose this run, then undistort all of the sample images
    if fVerbose:
        images = glob.glob(samples_dir + '/' + sample_images_pattern)
        for idx, fname in enumerate(images):
            print('Output undistort image #', str(idx))
            img = cv2.imread(fname)
            dst = cv2.undistort(img, mtx, dist, None, mtx)
            output_img_name = verbose_dir + '/undistort' + str(idx) + '.jpg'
            cv2.imwrite(output_img_name, dst)

    return mtx, dist  # return the calibration data
Пример #50
0
 def undistort(self, img):
     return cv2.undistort(img, self.cam_matrix, self.dist_coeffs)
Пример #51
0
    def imageQ(self, image):
        """Function to check image quality"""
        # Undistort the image
        self.currentImage = cv2.undistort(
            image, self.mtx, self.dist, None, self.mtx).astype(np.float32)

        # Convert to YUV space
        self.yuv = cv2.cvtColor(
            self.currentImage, cv2.COLOR_RGB2YUV).astype(np.float32)

        # Get some statistics for the sky image
        self.currentSkyL = self.yuv[0:self.mid, :, 0]
        self.currentSkyRGB[:, :] = self.currentImage[0:self.mid, :]
        self.skylrgb[0] = np.average(self.currentSkyL[0:self.mid, :])
        self.skylrgb[1] = np.average(self.currentSkyRGB[0:self.mid, :, 0])
        self.skylrgb[2] = np.average(self.currentSkyRGB[0:self.mid, :, 1])
        self.skylrgb[3] = np.average(self.currentSkyRGB[0:self.mid, :, 2])

        # Get some statistics for the road image
        self.currentRoadL = self.yuv[self.mid:self.y, :, 0]
        self.currentRoadRGB[:, :] = self.currentImage[self.mid:self.y, :]
        self.roadlrgb[0] = np.average(self.currentRoadL[0:self.mid, :])
        self.roadlrgb[1] = np.average(self.currentRoadRGB[0:self.mid, :, 0])
        self.roadlrgb[2] = np.average(self.currentRoadRGB[0:self.mid, :, 1])
        self.roadlrgb[3] = np.average(self.currentRoadRGB[0:self.mid, :, 2])

        # Sky image condition
        if self.skylrgb[0] > 160:
            self.skyImageQ = 'Sky Image: overexposed'

        elif self.skylrgb[0] < 50:
            self.skyImageQ = 'Sky Image: underexposed'

        elif self.skylrgb[0] > 143:
            self.skyImageQ = 'Sky Image: normal bright'

        elif self.skylrgb[0] < 113:
            self.skyImageQ = 'Sky Image: normal dark'

        else:
            self.skyImageQ = 'Sky Image: normal'

        # Sky detected weather or lighting conditions
        if self.skylrgb[0] > 128:
            if self.skylrgb[3] > self.skylrgb[0]:
                if self.skylrgb[1] > 120 and self.skylrgb[2] > 120:
                    if(self.skylrgb[2] - self.skylrgb[1]) > 20.0:
                        self.skyText = 'Sky Condition: tree shaded'
                    else:
                        self.skyText = 'Sky Condition: cloudy'
                else:
                    self.skyText = 'Sky Condition: clear'
            else:
                self.skyText = 'Sky Condition: UNKNOWN SKYL > 128'

        else:
            if self.skylrgb[2] > self.skylrgb[3]:
                self.skyText = 'Sky Condition: surrounded by trees'
                self.visibility = -80
            elif self.skylrgb[3] > self.skylrgb[0]:
                if(self.skylrgb[2] - self.skylrgb[1]) > 10.0:
                    self.skyText = 'Sky Condition: tree shaded'
                else:
                    self.skyText = 'Sky Condition: very cloudy or under overpass'
            else:
                self.skyText = 'Sky Condition: UNKNOWN!'

        self.roadbalance = self.roadlrgb[0] / 10.0

        # Road image condition
        if self.roadlrgb[0] > 160:
            self.roadImageQ = 'Road Image: overexposed'
        elif self.roadlrgb[0] < 50:
            self.roadImageQ = 'Road Image: underexposed'
        elif self.roadlrgb[0] > 143:
            self.roadImageQ = 'Road Image: normal bright'
        elif self.roadlrgb[0] < 113:
            self.roadImageQ = 'Road Image: normal dark'
        else:
            self.roadImageQ = 'Road Image: normal'
Пример #52
0
def video_pipeline(image):
    # resize the image to the size which was used for calibration
    # scale is used from the 2nd cell on this notebook

    rows, cols, chs = image.shape
    image = cv2.resize(image, (int(cols * scale), int(rows * scale)))
    img_size = (image.shape[1], image.shape[0])

    # Calibrate camera and undistort image
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, img_size, None, None)
    undist = cv2.undistort(image, mtx, dist, None, mtx)

    # Perform perspective transform
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)

    warped = cv2.warpPerspective(undist, M, img_size)
    grad_x = abs_sobel_thresh(warped, orient='x', sobel_kernel=3, thresh=abs_sobel_threshold_value)
    grad_y = abs_sobel_thresh(warped, orient='y', sobel_kernel=3, thresh=abs_sobel_threshold_value)
    # TODO: Not used, need to investigate and use the resulting binary images properly
    # Comment to self: The binary image combination is slowing down the performance
    # Need to optimize the process

    # create binary thresholded images using B and L channel
    b_channel = cv2.cvtColor(warped, cv2.COLOR_RGB2Lab)[:, :, 2]
    l_channel = cv2.cvtColor(warped, cv2.COLOR_RGB2LUV)[:, :, 0]

    # Threshold color channel
    s_channel = cv2.cvtColor(warped, cv2.COLOR_BGR2HLS)[:, :, 2]

    s_binary = np.zeros_like(s_channel)
    s_binary[(s_channel > s_thresh_min) & (s_channel <= s_thresh_max)] = 1

    b_binary = np.zeros_like(b_channel)
    b_binary[(b_channel > b_thresh_min) & (b_channel <= b_thresh_max)] = 1

    l_binary = np.zeros_like(l_channel)
    l_binary[(l_channel > l_thresh_min) & (l_channel <= l_thresh_max)] = 1

    combined_binary = np.zeros_like(b_binary)
    combined_binary[(l_binary == 1) | (b_binary == 1) | (s_binary == 1) |
                    ((grad_x == 1) & (grad_y == 1))] = 1

    # combined_binary[(l_binary == 1) | (b_binary == 1) | (s_binary == 1)
    # | ((grad_x == 1) & (grad_y == 1)) | ((magnitude_binary == 1)
    # & (threshold_range_binary == 1))] = 1

    # Get the valid pixels from the binary image
    x, y = np.nonzero(np.transpose(combined_binary))

    # If previous frame exists
    if leftLane.isFound == True:
        leftx, lefty, leftLane.found = leftLane.search_lane(x, y)
    if rightLane.isFound == True:
        rightx, righty, rightLane.found = rightLane.search_lane(x, y)

    # Otherwise..
    if rightLane.isFound == False:
        rightx, righty, rightLane.found = rightLane.heuristic_search(x, y, combined_binary)
    if leftLane.isFound == False:
        leftx, lefty, leftLane.found = leftLane.heuristic_search(x, y, combined_binary)

    # Initialize left and right x,y arrays
    lefty = np.array(lefty).astype(np.float32)
    leftx = np.array(leftx).astype(np.float32)
    righty = np.array(righty).astype(np.float32)
    rightx = np.array(rightx).astype(np.float32)

    # Fit polynomial on pixels detected on the left
    left_fit = np.polyfit(lefty, leftx, 2)

    # Get top and bottom intercept
    leftx_cap, left_top = leftLane.get_intercepts(left_fit)

    # Extend the area using intercept and then compute mean
    leftLane.x_cap.append(leftx_cap)
    leftLane.top.append(left_top)
    leftx_cap = np.mean(leftLane.x_cap)
    left_top = np.mean(leftLane.top)
    leftLane.lastx_cap = leftx_cap
    leftLane.last_top = left_top

    # Add the new computed values to current
    leftx = np.append(leftx, leftx_cap)
    lefty = np.append(lefty, 720)
    leftx = np.append(leftx, left_top)
    lefty = np.append(lefty, 0)

    # Apply sort to reoder the pixel vals. Without sorting, its total chaos on the lane!!
    leftx, lefty = leftLane.apply_sort(leftx, lefty)

    # Assign values to the class member vars
    leftLane.X = leftx
    leftLane.Y = lefty

    # Recalculate polynomial with intercepts and average across n frames
    left_coeff = np.polyfit(lefty, leftx, 2)

    # Append the coefficients from the above line
    leftLane.coeff0.append(left_coeff[0])
    leftLane.coeff1.append(left_coeff[1])
    leftLane.coeff2.append(left_coeff[2])

    # Compute the mean of the coefficients
    left_coeff_mean = [np.mean(leftLane.coeff0),
                       np.mean(leftLane.coeff1),
                       np.mean(leftLane.coeff2)]

    # Fit a new polynomial on the new values..Finally!
    left_coeff_x = left_coeff_mean[0] * lefty ** 2 + left_coeff_mean[1] * lefty + left_coeff_mean[2]
    leftLane.coeff_x = left_coeff_x

    # Repeat the same for right side..
    # TODO: May be make a new method in the PolyUtils class to do it in one go..this is tedious

    right_fit = np.polyfit(righty, rightx, 2)

    rightx_cap, right_top = rightLane.get_intercepts(right_fit)

    rightLane.x_cap.append(rightx_cap)
    rightx_cap = np.mean(rightLane.x_cap)
    rightLane.top.append(right_top)
    right_top = np.mean(rightLane.top)
    rightLane.lastx_cap = rightx_cap
    rightLane.last_top = right_top
    rightx = np.append(rightx, rightx_cap)
    righty = np.append(righty, 720)
    rightx = np.append(rightx, right_top)
    righty = np.append(righty, 0)

    rightx, righty = rightLane.apply_sort(rightx, righty)
    rightLane.X = rightx
    rightLane.Y = righty

    right_coeff = np.polyfit(righty, rightx, 2)
    rightLane.coeff0.append(right_coeff[0])
    rightLane.coeff1.append(right_coeff[1])
    rightLane.coeff2.append(right_coeff[2])
    right_mean_coeff = [np.mean(rightLane.coeff0), np.mean(rightLane.coeff1), np.mean(rightLane.coeff2)]

    right_coeff_x = right_mean_coeff[0] * righty ** 2 + right_mean_coeff[1] * righty + right_mean_coeff[2]
    rightLane.coeff_x = right_coeff_x

    # Radius of curvature for each lane
    left_curverad = leftLane.find_radius(leftx, lefty)
    right_curverad = rightLane.find_radius(rightx, righty)

    # Assign the value to the member variable of class PolyUtils
    leftLane.radius = left_curverad
    rightLane.radius = right_curverad

    # Calculate the vehicle position relative to the center of the lane
    position = (rightx_cap + leftx_cap) / 2
    distance_from_center = abs((360 - position) * 3.7 / 700)

    Minv = cv2.getPerspectiveTransform(dst, src)

    warp_zeros = np.zeros_like(combined_binary).astype(np.uint8)
    color_warp = np.dstack((warp_zeros, warp_zeros, warp_zeros))

    # Stack the left and right points and fill the polygon
    left_points = np.array([np.flipud(np.transpose(np.vstack([leftLane.coeff_x, leftLane.Y])))])
    right_points = np.array([np.transpose(np.vstack([right_coeff_x, rightLane.Y]))])
    total_points = np.hstack((left_points, right_points))

    cv2.polylines(color_warp, np.int_([total_points]), isClosed=False, color=(0, 0, 255), thickness=40)
    cv2.fillPoly(color_warp, np.int_(total_points), (34, 255, 34))

    # Unwarp using the Inverse matrix and image shape info
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))
    result = cv2.addWeighted(undist, 1, newwarp, 0.5, 0)

    # Add overlay info on the video
    if position > 360:
        cv2.putText(result, 'The car is {:.2f}m to the left from center'.format(distance_from_center), (50, 80),
                    fontFace=16, fontScale=1, color=(255, 255, 255), thickness=2)
    else:
        cv2.putText(result, 'The car is {:.2f}m to the right from center'.format(distance_from_center), (50, 80),
                    fontFace=16, fontScale=1, color=(255, 255, 255), thickness=2)
    # Print radius of curvature on video
    cv2.putText(result, 'Curvature Radius: {}(m)'.format(int((leftLane.radius + rightLane.radius) / 2)), (120, 140),
                fontFace=16, fontScale=1, color=(255, 255, 255), thickness=2)

    return result
path = os.path.abspath('.')
fname = path + "/calibration_result/calibration_parameters.txt"
print(fname)
with open(fname, "w") as f:
    f.write("{'ret':"+str(ret)+", 'mtx':"+str(list(mtx))+', "dist":'+str(list(dist))+'}')
    f.close()
np.savetxt(imagesFolder+"calib_mtx_webcam.csv", mtx)
np.savetxt(imagesFolder+"calib_dist_webcam.csv", dist)


#imagesFolder = "data_image_detect/"

i=24 # select image id
plt.figure()
frame = cv2.imread(imagesFolder + "image_100.jpg".format(i))
img_undist = cv2.undistort(frame,mtx,dist,None)
plt.subplot(211)
plt.imshow(frame)
plt.title("Raw image")
plt.axis("off")
plt.subplot(212)
plt.imshow(img_undist)
plt.title("Corrected image")
plt.axis("off")
plt.show()

##Use of camera calibration to estimate 3D translation and rotation of each marker on a scene
imagesFolder = "src/ar_markers/src/data_image_detect/"
frame = cv2.imread(imagesFolder + "image_10.png")
plt.figure()
plt.imshow(frame)
def undistort_image(img, mtx, dist):
    # Undistort img, using the mtx/dist matrices
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    return undist
def undist(img):
    """
    Undistort the image
    """
    return cv2.undistort(img, mtx, dist, None, mtx)
Пример #56
0
    def process_image(self, frame):
        """
        Input frame of the frame from the camera containing the
        sheet markers and the robot markers.
        :param frame: Input image frame
        :return: Image with the markers marked
        """
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # lists of ids and the corners belonging to each marker
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, self.aruco_dict, parameters=self.parameters)
        print(ids)
        if np.all(ids != None):

            try:
                # Detecting 4 corner markers of Sheet
                index_bl = np.where(ids == np.array(self.sheet_bl))[0][0]
                index_tl = np.where(ids == np.array(self.sheet_tl))[0][0]
                index_tr = np.where(ids == np.array(self.sheet_tr))[0][0]
                index_br = np.where(ids == np.array(self.sheet_br))[0][0]

                # Debug Marking!!
                aruco.drawDetectedMarkers(frame, [corners[index_bl]])
                aruco.drawDetectedMarkers(frame, [corners[index_tl]])
                aruco.drawDetectedMarkers(frame, [corners[index_tr]])
                aruco.drawDetectedMarkers(frame, [corners[index_br]])

                cv2.circle(
                    frame,
                    (corners[index_bl][0][0][0], corners[index_bl][0][0][1]),
                    3, (0, 255, 0), -1)
                cv2.circle(
                    frame,
                    (corners[index_tl][0][0][0], corners[index_tl][0][0][1]),
                    3, (0, 255, 0), -1)
                cv2.circle(
                    frame,
                    (corners[index_tr][0][0][0], corners[index_tr][0][0][1]),
                    3, (0, 255, 0), -1)
                cv2.circle(
                    frame,
                    (corners[index_br][0][0][0], corners[index_br][0][0][1]),
                    3, (0, 255, 0), -1)

                # Preprocessing image
                #cv2.undistort(frame,None,None)
                pnts = np.array([
                    (corners[index_bl][0][0][0], corners[index_bl][0][0][1]),
                    (corners[index_tl][0][0][0], corners[index_tl][0][0][1]),
                    (corners[index_tr][0][0][0], corners[index_tr][0][0][1]),
                    (corners[index_br][0][0][0], corners[index_br][0][0][1])
                ])
                warped = four_point_transform(frame, pnts)

                v_corners, v_ids, _ = aruco.detectMarkers(
                    warped, self.aruco_dict, parameters=self.parameters)
                if np.all(v_ids != None):

                    # Detecting Car markers
                    index_vehicle_b = np.where(
                        v_ids == np.array(self.vehicle_id_b))[0][0]
                    index_vehicle_f = np.where(
                        v_ids == np.array(self.vehicle_id_f))[0][0]

                    aruco.drawDetectedMarkers(warped,
                                              [v_corners[index_vehicle_b]])
                    aruco.drawDetectedMarkers(warped,
                                              [v_corners[index_vehicle_f]])

                    # Calculating the Markers centre
                    self.f_marker_centre = get_marker_centre(
                        v_corners[index_vehicle_b][0][0],
                        v_corners[index_vehicle_b][0][1],
                        v_corners[index_vehicle_b][0][2],
                        v_corners[index_vehicle_b][0][3])

                    self.b_marker_centre = get_marker_centre(
                        v_corners[index_vehicle_f][0][0],
                        v_corners[index_vehicle_f][0][1],
                        v_corners[index_vehicle_f][0][2],
                        v_corners[index_vehicle_f][0][3])

                    # Removing errors using intrensic and extrensic matrix
                    warped = cv2.undistort(warped, self.camera_mat)
                    warped = cv2.projectPoints(
                        [pnts, self.b_marker_centre, self.f_marker_centre],
                        self.r_vec, self.t_vec, self.camera_mat)

                    # Drawing the line from origin to robot markers
                    cv2.line(warped, (0, 0), self.f_marker_centre,
                             (255, 0, 255), 3)
                    cv2.line(warped, (0, 0), self.b_marker_centre,
                             (255, 255, 255), 3)

                    #print(warped.shape)

                    # Finding the conversion factor of pixel
                    # into 'mm' of actual sheet
                    width_ratio = (self.sheet_width / warped.shape[1])
                    height_ratio = (self.sheet_height / warped.shape[0])

                    # Conversion of robot aruco-markers coordinate in pixels
                    # into 'mm' using the calculated ratios
                    self.f_robot_cartisian = (int(width_ratio *
                                                  self.f_marker_centre[0]),
                                              int(height_ratio *
                                                  self.f_marker_centre[1]))

                    self.b_robot_cartisian = (int(width_ratio *
                                                  self.b_marker_centre[0]),
                                              int(height_ratio *
                                                  self.b_marker_centre[1]))
                    #print(self.f_robot_cartisian,self.b_robot_cartisian)
                    return warped

            except:
                return None

        return None
Пример #57
0
 def undistort(self, image):
     image_size = (image.shape[1], image.shape[0])
     ret, mtx, dist, _, _ = cv2.calibrateCamera(self.obj_points,
                                                self.img_points, image_size,
                                                None, None)
     return cv2.undistort(image, mtx, dist, None, mtx)
Пример #58
0
def image_undistort(img, mtx, dist):
    '''
    takes an image and returns an undistorted image
    '''
    return cv2.undistort(img, mtx, dist, None, mtx)
Пример #59
0
def main(prgRun):
    problem = 3
    problem = int(
        input(
            'Which part would you like to run? \nEnter 1 for ngihtime image . \nEnter 2 for the firt part of the lane finder. \nEnter 3 for the second part of the lane finder (Challenge video): '
        ))

    #Correct image
    if problem == 1:

        video = cv2.VideoCapture('Night Drive - 2689.mp4')

        # Read until video is completed
        while (video.isOpened()):
            # Capture frame-by-frame
            ret, frame = video.read()
            if ret == True:
                frame = imutils.resize(frame, width=320, height=180)
                frame.shape
                ogframe = frame
                clnframe = frame
                resetframe = frame
                cv2.imshow('Original Frame', frame)

            ##########################Correct frame###########################
            gamma = 5
            sat = 5
            hue = 1
            contrast = 60

            frame = adjust_gamma(frame, gamma)
            frame = adjustSaturation(frame, hue, sat)
            frame = adjustContrast(frame, contrast)
            frame = cv2.bilateralFilter(frame, 15, 75, 75)
            frame = cv2.GaussianBlur(frame, (5, 5), 0)

            ###################Output Imagery##########################
            cv2.imshow('DWF', frame)
            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

    #Lane finder Image set
    elif problem == 2:
        directory = './data'
        directory = str(
            input(
                'What is the name and directory of the folder with the images? Note, this should be entered as"./folder_name if on Windows": \n'
            ))

        print("Getting images from " + str(directory))
        imageList = imagefiles(directory)  # get a stack of images
        h**o = HomoCalculation.homo_problem2()
        homo_inv = perspective.invHomo(h**o)
        """process each image individually"""
        for i in range(len(imageList)):
            frameDir = directory + '/' + imageList[i]
            frame = cv2.imread(frameDir)
            # frame = imutils.resize(frame, width=320, height=180)

            ##########################Correct frame###########################
            K, D = cameraParamsPt2()
            frame = cv2.undistort(frame, K, D, None, K)
            ##########################Thresh frame###########################
            grayframe = grayscale(frame)
            frame = cv2.GaussianBlur(frame, (5, 5), 0)
            binaryframe = yellowAndWhite(frame)
            ############################Region ################
            region = process_image(binaryframe)
            #####################Homography and dewarp########################
            """the next line give you a flat view of current frame"""
            flatfieldBinary = cv2.warpPerspective(
                region, h**o, (frame.shape[0], frame.shape[1]))
            flatBGR = cv2.warpPerspective(frame, h**o,
                                          (frame.shape[0], frame.shape[1]))

            ####################Contour#######################################
            cnts, hierarchy = cv2.findContours(flatfieldBinary, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

            bincntframe = cv2.drawContours(flatfieldBinary, cnts, -5, 255, 5)
            # cv2.imshow('flatfield binary', bincntframe)

            # cntframe = cv2.drawContours(flatBGR, cnts,-5, (255, 0, 0), 5)

            ###################Draw Lines##########################################
            LanesDrawn, LeftLines, RightLines, Turning = MarkLanes(
                bincntframe, flatBGR, frame)
            # cv2.imshow('Flafield Lanes', LanesDrawn)

            leftLane_warped = perspective.perspectiveTransfer_coord(
                LeftLines, homo_inv)[250:1200]
            rightLane_warped = perspective.perspectiveTransfer_coord(
                RightLines, homo_inv)[250:1200]

            Turning = CheckTurn(rightLane_warped, leftLane_warped,
                                frame)[300:1000]

            MidLane = cv2.polylines(frame, [Turning], False, (0, 255, 0), 5)
            frame_lane = cv2.polylines(MidLane, [leftLane_warped], False,
                                       (0, 0, 255), 5)
            frame_lane = cv2.polylines(frame_lane, [rightLane_warped], False,
                                       (255, 0, 0), 5)

            ###################Output Imagery##########################
            # frame_lane = imutils.resize(frame_lane, width=320, height=180)
            # LanesDrawn = imutils.resize(LanesDrawn, width=320, height=180)

            cv2.imshow('Working Frame', LanesDrawn)
            cv2.imshow("Imposed Lane Lines", frame_lane)

            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

            if flag:
                cv2.imshow('unwarped video', img_unwarped)
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break

    #Lane Finder Challenge vid
    elif problem == 3:

        video = cv2.VideoCapture('challenge_video.mp4')
        h**o = HomoCalculation.homo_problem3()
        homo_inv = perspective.invHomo(h**o)
        # Read until video is completed
        while (video.isOpened()):
            # Capture frame-by-frame
            ret, frame = video.read()
            if ret == True:
                # frame = imutils.resize(frame, width=320, height=180)
                # frame = frame[int(frame.shape[0] / 2) + 70:, :]
                ogframe = frame
                clnframe = frame
                resetframe = frame

                ##########################Correct frame###########################
                K, D = cameraParamsPt3()
                frame = cv2.undistort(frame, K, D, None, K)

                ##########################Thresh frame###########################
                grayframe = grayscale(frame)
                frame = cv2.GaussianBlur(frame, (5, 5), 0)
                binaryframe = yellowAndWhite(frame)
                ############################Region ################
                region = process_image(binaryframe)
                #####################Homography and dewarp########################

                #check
                """the next line give you a flat view of current frame"""
                flatfieldBinary = cv2.warpPerspective(
                    region, h**o, (frame.shape[0], frame.shape[1]))
                flatBGR = cv2.warpPerspective(frame, h**o,
                                              (frame.shape[0], frame.shape[1]))

                ####################Contour#######################################
                cnts, hierarchy = cv2.findContours(flatfieldBinary,
                                                   cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

                bincntframe = cv2.drawContours(flatfieldBinary, cnts, -5,
                                               (255), 5)
                # cv2.imshow('flatfield binary', bincntframe)

                # cntframe = cv2.drawContours(flatBGR, cnts,-5, (255, 0, 0), 5)

                ###################Draw Lines##########################################

                LanesDrawn, LeftLines, RightLines, null = MarkLanes(
                    bincntframe, flatBGR, frame)

                leftLane_warped = perspective.perspectiveTransfer_coord(
                    LeftLines, homo_inv)[100:1100]
                rightLane_warped = perspective.perspectiveTransfer_coord(
                    RightLines, homo_inv)[100:1100]

                Turning = CheckTurn(rightLane_warped, leftLane_warped,
                                    frame)[500:1000]

                MidLane = cv2.polylines(frame, [Turning], False, (0, 255, 0),
                                        5)
                frame_lane = cv2.polylines(MidLane, [leftLane_warped], False,
                                           (0, 0, 255), 5)
                frame_lane = cv2.polylines(frame_lane, [rightLane_warped],
                                           False, (255, 0, 0), 5)

                ###################Output Imagery##########################
                LanesDrawn = imutils.resize(LanesDrawn, width=320, height=180)
                cv2.imshow('Working Frame', LanesDrawn)

                frame_lane = imutils.resize(frame_lane, width=320, height=180)
                cv2.imshow('Final Frame', frame_lane)

                # Press Q on keyboard to  exit
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break
    cv2.destroyAllWindows()
    prgRun = False
    return prgRun
def process_image(img, plot=False):

    global mtx, dist, src, dst

    # undist_img = undistort(img, mtx, dist)
    undist_img = cv2.undistort(img, mtx, dist, None, mtx)
    warped_binary = luv_lab_filter(undist_img, l_thresh=(210, 255),
                                   b_thresh=(143, 200))
    nonzerox, nonzeroy = np.nonzero(np.transpose(warped_binary))

    if left.detected is True:
        leftx, lefty, left.detected = left.quick_search(nonzerox, nonzeroy)
    if right.detected is True:
        rightx, righty, right.detected = right.quick_search(nonzerox, nonzeroy)
    if left.detected is False:
        leftx, lefty, left.detected, out_img_left = left.blind_search(nonzerox, nonzeroy, warped_binary)
    if right.detected is False:
        rightx, righty, right.detected, out_img_right = right.blind_search(nonzerox, nonzeroy, warped_binary)

    left.y = np.array(lefty).astype(np.float32)
    left.x = np.array(leftx).astype(np.float32)
    right.y = np.array(righty).astype(np.float32)
    right.x = np.array(rightx).astype(np.float32)

    left_fit, left_fitx, left_fity = left.get_fit()
    right_fit, right_fitx, right_fity = right.get_fit()

    left_curv = left.get_curv()
    right_curv = right.get_curv()
    mean_curv = np.mean([left_curv, right_curv])
    offset = car_pos(left_fit, right_fit)

    result = draw_area(undist_img, left_fitx, left_fity, right_fitx, right_fity)

    font = cv2.FONT_HERSHEY_SIMPLEX  # 使用默认字体
    text1 = 'Radius of Curvature: %d(m)'
    text2 = 'Offset from center: %.2f(m)'
    text3 = 'Radius of Curvature: Inf (m)'

    if mean_curv < 3000:
        cv2.putText(result, text1 % (int(mean_curv)),
                                  (60, 100), font, 1.0, (255, 255, 255), thickness=2)
    else:
        cv2.putText(result, text3,
                    (60, 100), font, 1.0, (255, 255, 255), thickness=2)
    cv2.putText(result, text2 % (-offset),
                              (60, 130), font, 1.0, (255, 255, 255), thickness=2)

    if plot is True:
        warped = warp(img)

        l = cv2.cvtColor(warped, cv2.COLOR_RGB2LUV)[:, :, 0]
        # l_bin = np.zeros_like(l)
        # l_bin[(l >= l_thresh[0]) & (l <= l_thresh[1])] = 1
        b = cv2.cvtColor(warped, cv2.COLOR_RGB2Lab)[:, :, 2]
        # b_bin = np.zeros_like(b)
        # b_bin[(b >= b_thresh[0]) & (b <= b_thresh[1])] = 1
        # combine = np.zeros_like(l)
        # combine[(l_bin == 1) | (b_bin == 1)] = 1
        #
        out_combine = cv2.addWeighted(out_img_left, 1, out_img_right, 0.5, 0)
        plt.figure(figsize=(12, 8))
        plt.subplot(231)
        plt.imshow(undist_img)
        plt.title('Undistort Img')
        plt.subplot(232)
        plt.imshow(warped)
        plt.plot(left_fitx, left_fity, color='green')
        plt.plot(right_fitx, right_fity, color='green')
        plt.xlim(0, 1280)
        plt.ylim(720, 0)
        plt.subplot(233)
        plt.imshow(result)
        plt.subplot(234)
        plt.imshow(b, cmap='gray')
        plt.title('B-channel')
        plt.subplot(235)
        plt.imshow(l, cmap='gray')
        plt.title('L-channel')
        plt.subplot(236)
        plt.imshow(out_combine, cmap='gray')
        plt.show()

    return result