Пример #1
0
def DetectCorners(frame, keypoints=[], kernel_size=5, kernel_step_size=1, k=0.04, thresh=1000):
	'''
	 @brief Detect corners using the Harris corner method.
	 	Inspired by: https://github.com/hughesj919/HarrisCorner/blob/master/Corners.py

	 @param frame (grayscale)
	 @param keypoints (list of cv2 keypoints. The frame will be reset if any keypoints are given. (default=empty list))
	 @param kernel_size (It is the size of neighbourhood considered for corner detection (default=5))
	 @param kernel_step_size (default=1)
	 @param k (Harris corner constant - usually between 0.04 - 0.06 (default=0.04))
	 @param thresh (Response threshold (default=1000))

	 @return corners (list of corners given as cv2 keypoints. Get position of keypoint as kp.pt (x,y position) and response of corner as kp.response)
	'''
	corners 			= []
	offset 				= int(kernel_size//2)
	kernel_step_size 	= int(round(kernel_step_size))

	len_kp = len(keypoints)
	if len_kp > 0: # Find corners from keypoints.
		corners_frame = np.zeros(GetShape(frame), dtype=np.uint8)
		for i in range(len_kp):
			corners_frame[int(round(keypoints[i].pt[1])), int(round(keypoints[i].pt[0]))] = keypoints[i].size
	else: # Find corners in given frame.
		corners_frame = frame

	#Find x and y derivatives
	dy, dx = np.gradient(corners_frame)
	Ixx = dx**2
	Ixy = dy*dx
	Iyy = dy**2
	height, width = GetShape(corners_frame)

	for y in range(offset, height-offset, kernel_step_size):
		for x in range(offset, width-offset, kernel_step_size):
			#Calculate sum of squares
			windowIxx = Ixx[y-offset:y+offset+1, x-offset:x+offset+1]
			windowIxy = Ixy[y-offset:y+offset+1, x-offset:x+offset+1]
			windowIyy = Iyy[y-offset:y+offset+1, x-offset:x+offset+1]
			Sxx = windowIxx.sum()
			Sxy = windowIxy.sum()
			Syy = windowIyy.sum()

			#Find determinant and trace, use to get corner response
			det = (Sxx * Syy) - (Sxy**2)
			trace = Sxx + Syy
			r = det - k*(trace**2)

			#If corner response is over threshold, add to corner list
			if r > thresh:
				corners.append(cv2.KeyPoint(x=x, y=y, _size=r/thresh, _response=r))
	return corners
Пример #2
0
	def GetContentRequestPointList(self, content, error=False):
		'''
		 @brief Get unpacked/packet content for point list request.

		 @param content
		 @param error (True/False)

		 if master:
		 	@return keypoints, descriptors, valid (Return: keypoints = point keys data, descriptors = point description data, valid = True/False)
		 else: (slave)
		 	@return content
		'''
		keypoints 	= None
		descriptors = None
		und_shape 	= None
		if self.__master_or_slave: # True = master 
			valid = content['valid']
			error = content['error']
			if valid:
				und_shape 	= content['und_shape']
				keypoints 	= content['keypoints']
				descriptors = content['descriptors']
				keypoints 	= list_to_keypoints(keypoints)
				descriptors = np.array(descriptors)
			return und_shape, keypoints, descriptors, valid, error
		else:
			valid = False
			if not(isinstance(content, type(None))):
				valid = True
				original_frame, original_sl_frame, frame_un, delta_frame, keypoints, descriptors = content
				und_shape 	= GetShape(frame_un)
				keypoints 	= keypoints_to_list(keypoints)
				descriptors = descriptors.tolist()
			content = {'und_shape': und_shape, 'keypoints': keypoints, 'descriptors': descriptors, 'valid': valid, 'error': error}
			return content
Пример #3
0
	def test_PyQtGraphImageMultipleImages(self):
		'''
		 @brief Test PyQtImage real-time plot with multiple images
		'''
		from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage
		from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import CheckColor, GetShape
		import time, cv2, timeit
		from getpass import getpass

		for folder, left_frames, right_frames, actual_distances, baselines, use_set in self.GetFrameSets():
			if use_set:
				self.pqImage = self.PyQtImage.PyQtImage(initialize=True, title='Test real-time multiple plot')
				timer = timeit.default_timer()
				reset = True
				for i in range(len(left_frames)):
					frame_l 	= GetImage(folder+left_frames[i][0])
					frame_l_sl 	= GetImage(folder+left_frames[i][1])
					frame_r 	= GetImage(folder+right_frames[i][0])
					frame_r_sl 	= GetImage(folder+right_frames[i][1])
					frame_l 		= CheckColor(frame_l)
					widht, height 	= GetShape(frame_l)
					cv2.line(frame_l, (0,0), (height,widht), (255,255,0), 10)
					touple_frames = [('frame_l', frame_l), ('frame_l_sl', frame_l_sl), ('frame_r', frame_r), ('frame_r_sl', frame_r_sl)]
					self.pqImage(touple_frames, rotate=True)
					if not(self.CheckAllTests()):
						getpass('Press enter to continue to next frame..')
					if reset:
						self.pqImage(reset=True)
						reset = False
					else:
						reset = True
				print 'Delay for processing {0} image sets was: {1} sec'.format(len(left_frames), timeit.default_timer()-timer)
				self.pqImage.ClosePQWindow()
Пример #4
0
def DetectCornersCV2(frame, keypoints=[], kernel_size=5, k_sobel_size=3, k=0.04, thresh=0.8):
	'''
	 @brief Detect corner using the Harris corner detector as provided by the opencv library.

	 @param frame (grayscale)
	 @param keypoints (list of cv2 keypoints. The frame will be reset if any keypoints are given. (default=empty list))
	 @param kernel_size (It is the size of neighbourhood considered for corner detection (default=5))
	 @param k_sobel_size (Aperture parameter of Sobel derivative used. (default=3))
	 @param k (Harris corner constant - usually between 0.04 - 0.06 (default=0.04))
	 @param thresh (Threshold value relative to the maximum corner response. (default=0.01))

	 @return corners (list of corners given as cv2 keypoints. Get position of keypoint as kp.pt (x,y position) and response of corner as kp.response)
	'''
	len_kp = len(keypoints)
	if len_kp > 0: # Find corners from keypoints.
		corners_frame = np.zeros(GetShape(frame), dtype=np.float32)
		for i in range(len_kp):
			corners_frame[int(round(keypoints[i].pt[1])), int(round(keypoints[i].pt[0]))] = keypoints[i].size
	else: # Find corners in given frame.
		corners_frame = np.float32(frame)

	corners_frame = cv2.cornerHarris(corners_frame, kernel_size, k_sobel_size, k)

	#result is eroded for marking the corners
	corners_frame = cv2.erode(corners_frame, np.ones((3,3), dtype=np.uint8))

	# Threshold for an optimal value, it may vary depending on the image.
	kp_positions 	= np.flip(np.argwhere(corners_frame > thresh*corners_frame.max()), 1)
	n_kps 			= len(kp_positions)
	corners 		= [cv2.KeyPoint()]*n_kps
	for i in range(n_kps):
		corners[i].pt 		= tuple(kp_positions[i])
		corners[i].size 	= corners_frame[kp_positions[i,1], kp_positions[i,0]]
		corners[i].response = corners_frame[kp_positions[i,1], kp_positions[i,0]]
	return corners
Пример #5
0
def PrintHoughAccumulator(accumulator, id_m, rhos):
    '''
	 @brief Print Hough transform accumulator in matplotlib figure.

	 @param accumulator
	'''
    acc_shape = GetShape(accumulator)
    ylabs = np.deg2rad(np.arange(-0.0, 180.0, 1))
    acc_map = np.zeros((acc_shape[0], len(ylabs)), dtype=np.uint16)
    id_acc = np.argwhere(accumulator > 0)
    for i in range(len(id_acc)):

        idx = id_acc[i][0] * acc_shape[1] + id_acc[i][1]
        rho = rhos[idx / acc_shape[1]]
        acc_map[int(rho), idx % acc_shape[1]] += 1

    acc_map = cv2.cvtColor(acc_map, cv2.COLOR_GRAY2BGR)

    for i in range(len(id_m)):
        idx = id_m[i][0] * acc_shape[1] + id_m[i][1]
        rho = rhos[idx / acc_shape[1]]
        cv2.circle(acc_map, (int(rho), idx % acc_shape[1]), 1, (0, 0, 255), -1)

    acc_map = CheckGrayScale(acc_map)

    touple_frame_plot = []
    touple_frame_plot.append(('Hough line accumulator', acc_map.T))
    MatplotShow(touple_frame_plot, 'Hough_line_accumulator')
Пример #6
0
    def NormalizeEdgeHeading(self, frame, rho, theta):
        '''
		 @brief Normalize the edge heading (rho, theta) according to image center.
		 	See theory about the hough transform.
		 	Short: rho is originally considered from top left image corner, 
		 		we want it from image center.
		 		theta is ranged from 0 -> pi, with rho as positive or negative,
		 		we want theta to be ranged from 0-> 2pi and rho as positive.
		 		From image center:
		 			if rho < 0:
		 				theta += pi 
		 			rho = abs(rho), where rho is distance to line from image center.

		 @param frame
		 @param rho
		 @param theta
		 @param draw (draw center hough line points. (default=False))
		 @param color (draw color if draw=True (default=(255,255,0)))

		 @return rho, theta (normalized)
		'''
        if rho < 0:
            theta += np.pi
            rho *= -1.0
        width, height = GetShape(frame)
        x0, y0 = height / 2.0, width / 2.0
        x1, y1, x2, y2 = self.GetHoughLineSegment(frame, rho, theta)
        theta = self.GetAngleOfLineBetweenTwoPoints(x1, y1, x2, y2, x0, y0)
        rho = self.GetDistanceToLine(x1, y1, x2, y2, x0, y0)
        return rho, theta
Пример #7
0
    def DrawErrorArrow(self,
                       frame,
                       heading,
                       draw_min_rho=True,
                       rho_min_diag_perc=1 / 4.0,
                       color=(255, 0, 0),
                       min_rho_color=(153, 0, 0),
                       line_thick=5,
                       draw_arrow=True):
        '''
		 @brief Draw error line/arrow from point on line to image edge.

		 @param frame 
		 @param heading (rho, theta, max/min, hor/vert)
		 @param draw_min_rho (True/False)
		 @param rho_min_diag_perc (default=1/4.0)
		 @param color (default=(255,0,0) (RED))
		 @param min_rho_color (default=(153,0,0) (DARK RED))
		 @param line_thick (default=5)
		 @param draw line as arrow

		 @return frame (color)
		'''
        rho, theta, max_line, hor_or_vert = heading[:4]
        if not (rho == None or theta == None):
            frame = CheckColor(frame)
            I_l, I_w = GetShape(frame)
            x1 = int(round(I_w / 2.0 + np.cos(theta) * rho))
            y1 = int(round(I_l / 2.0 + np.sin(theta) * rho))

            flip_rot = 0.0
            if not (self.VerifyEdgeQuadrant(heading)):
                flip_rot = np.pi  # Edge is in the wrong quadrant, so find rho_f in the correct direction.
            I_cos = I_w * np.cos(theta + flip_rot)
            I_sin = I_l * np.sin(theta + flip_rot)
            diag_rot_l = 0.5 * np.sqrt(I_cos * I_cos + I_sin * I_sin)
            x2 = int(round(I_w / 2.0 + np.cos(theta + flip_rot) * diag_rot_l))
            y2 = int(round(I_l / 2.0 + np.sin(theta + flip_rot) * diag_rot_l))
            x_rho_min = int(
                round(I_w / 2.0 + np.cos(theta + flip_rot) * diag_rot_l *
                      (1.0 - rho_min_diag_perc)))
            y_rho_min = int(
                round(I_l / 2.0 + np.sin(theta + flip_rot) * diag_rot_l *
                      (1.0 - rho_min_diag_perc)))
            if draw_arrow:
                cv2.arrowedLine(frame, (x1, y1), (x2, y2),
                                color,
                                line_thick,
                                tipLength=0.1)
                if draw_min_rho:
                    cv2.arrowedLine(frame, (x1, y1), (x_rho_min, y_rho_min),
                                    min_rho_color,
                                    line_thick,
                                    tipLength=0.1)
            else:
                cv2.line(frame, (x1, y1), (x2, y2), color, line_thick)
                if draw_min_rho:
                    cv2.line(frame, (x1, y1), (x_rho_min, y_rho_min),
                             min_rho_color, line_thick)
        return frame
Пример #8
0
def DrawHoughLine(frame, hough_line, color):
    '''
	 @brief Draw hough line on frame.
	 	Hough lines are give in rho, theta coordinates.
	
	 @param frame
	 @param hough_line (rho, theta)
	 @param color RGB color (R,G,B)

	 @return frame (with drawn line)
	'''
    frame = CheckColor(frame)
    width, height = GetShape(frame)
    size = math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0))
    rho, theta = hough_line
    if not (np.isnan(theta)) and not (np.isnan(rho)):
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a * rho
        y0 = b * rho
        x1 = int(x0 + size * (-b))
        y1 = int(y0 + size * (a))
        x2 = int(x0 - size * (-b))
        y2 = int(y0 - size * (a))
        cv2.line(frame, (x1, y1), (x2, y2), color, 2)
    return frame
Пример #9
0
    def UpdatePQImage(self,
                      frame,
                      key_tag,
                      process_events=True,
                      rotate=True,
                      autoDownsample=True):
        '''
		 @brief Plot image in real-time

		 @param frame
		 @param key_tag (also frame title)
		 @param process_events (True/False for processing new events (plotting new data))
		 @param rotate (True/False for rotating the image so that it is shown properly. (default=True))
		 @param autoDownsample (True/False (default=True))
		'''
        if not (self.__win_open):
            self.InitPQImage(title=self.__title)
        self.AssertPQImageInititalized()
        if not (key_tag in self.__img_items):
            self.AddPQImagePlot(key_tag)
        if rotate:
            width, height = GetShape(frame)
            M = cv2.getRotationMatrix2D((height / 2, width / 2), -90, 1)
            self.__img_items[key_tag].setImage(cv2.warpAffine(
                frame, M, (height, width)),
                                               autoDownsample=autoDownsample)
        else:
            self.__img_items[key_tag].setImage(frame,
                                               autoDownsample=autoDownsample)
        if process_events:
            self.TriggProcessEvents()
            if self.CheckFlagReset():
                self.UpdatePQImage(frame, key_tag, process_events, rotate,
                                   autoDownsample)
Пример #10
0
    def Undistort(self, frame):
        '''
		 @brief Undistort frame using the remapping method.
		 	Should give same result as Undistort()

		 @param frame

		 @return Undistorted frame
		'''
        self.AssertCameraCalibrated()
        if not (self.CheckIntrinsicScale(GetShape(frame))):
            self.RectifyCamera(GetShape(frame))
            self.InitUndistortRectifyMap()
        und_frame = cv2.remap(frame, self.__mapx, self.__mapy,
                              cv2.INTER_LINEAR)
        return self.CropUndistortedFrame(und_frame)
Пример #11
0
    def CalibrateCamera(self, frame_size=None):
        '''
		 @brief Compute the camera matrix, distortion coefficients, rotation and translation vectors.

		 @param frame_size (height. width)
		'''
        if not (isinstance(frame_size, tuple)) and not (isinstance(
                frame_size, list)):
            frame_size = GetShape(GetImage(self.__calib_img_fnames[0]))
        if self.__focal_length != None and self.__sensor_size != None:
            #flags 	= cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_FIX_ASPECT_RATIO
            flags = cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_FIX_PRINCIPAL_POINT
            self.__calib_params['intrinsic_mtx'] = self.ComputeCameraMatrix(
                frame_size, self.__focal_length, self.__sensor_size)
            retval, self.__calib_params['intrinsic_mtx'], self.__calib_params[
                'distortion_coeffs'], rvecs, tvecs = cv2.calibrateCamera(
                    self.__calib_params['objpoints'],
                    self.__calib_params['imgpoints'],
                    (frame_size[1], frame_size[0]),
                    self.__calib_params['intrinsic_mtx'],
                    None,
                    flags=flags)
        else:
            retval, self.__calib_params['intrinsic_mtx'], self.__calib_params[
                'distortion_coeffs'], rvecs, tvecs = cv2.calibrateCamera(
                    self.__calib_params['objpoints'],
                    self.__calib_params['imgpoints'],
                    (frame_size[1], frame_size[0]), None, None)
        if not (retval):
            raise Exception('Camera calibration failed!')
Пример #12
0
    def FindChessBoardCorners(self):
        '''
		 @brief Find chessboard matches and append to imgpoints.
		'''
        if self.__show_chessboard_img:
            realTimePlot = self.GetNewRealTimePlot()

        ret_calib = False
        for i in range(len(self.__calib_img_fnames)):
            fname = self.__calib_img_fnames[i]
            img = GetImage(fname, gray=False)
            gray = CheckGrayScale(img)
            gray_shape = GetShape(gray)
            if i > 0:
                if not (self.CheckIntrinsicScale(gray_shape)):
                    raise ValueError(
                        'Calibration image dimensions do not match!')
            else:
                self.SetScale(gray_shape)

            # Find the chess board corners
            flags = cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE | cv2.CALIB_CB_FAST_CHECK
            ret, corners = cv2.findChessboardCorners(
                gray, (self.__calib_chess_rows, self.__calib_chess_columns),
                flags=flags)

            # If found, add object points, image points (after refining them)
            print_msg = '{0}/{1} images processed'.format(
                i + 1, len(self.__calib_img_fnames))
            if ret:
                self.PrintCalibrationProcess(
                    print_msg + ' - {0} was successfull'.format(fname))
                ret_calib = True
                self.__calib_params['objpoints'].append(self.__objp)

                corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                           self.__criteria)
                self.__calib_params['imgpoints'].append(corners)

                if self.__show_chessboard_img:
                    cv2.drawChessboardCorners(
                        img,
                        (self.__calib_chess_rows, self.__calib_chess_columns),
                        corners, ret)
                    realTimePlot(reset=True)
                    realTimePlot([(fname[len(self.__calib_folder):], img)],
                                 'calibration_frames')
            else:
                self.PrintCalibrationProcess(
                    print_msg + ' - {0} was not successfull'.format(fname))
                if self.__show_chessboard_img:
                    realTimePlot(reset=True)
                    realTimePlot([
                        ('Failed: ' + fname[len(self.__calib_folder):], gray)
                    ], 'calibration_frames')

        if not (ret_calib):
            err_msg = 'None of the chessboard calibration frames could be used for calibration. Folder = {0}'.format(
                self.__calib_folder)
            raise Exception(err_msg)
Пример #13
0
    def GetFrameProperties(self):
        '''
		 @brief Get frame properties such as fps, width, length

		 @return fps, width, height
		'''
        dim = GetShape(self.__frame)
        return 1.0, dim[0], dim[1]
Пример #14
0
    def InitRecording(self, initFrame):
        '''
		 @brief Initialize recording by adapting the video to the first frame input.

		 @param initFrame
		'''
        self.__height, self.__width = GetShape(initFrame)
        self.StartRecording()
    def Undistort(self, frame):
        '''
		 @brief Stereo undistorting

		 @return undistorted frame
		'''
        self.AssertStereoCalibrated()
        if not (self.CheckIntrinsicStereoScale(GetShape(frame))):
            self.SetIntrinsicStereoScale(GetShape(frame))
        if self.__me_master:
            und_frame = cv2.remap(frame, self.__left_rectify_maps[0],
                                  self.__left_rectify_maps[1],
                                  cv2.INTER_LANCZOS4)
        else:
            und_frame = cv2.remap(frame, self.__right_rectify_maps[0],
                                  self.__right_rectify_maps[1],
                                  cv2.INTER_LANCZOS4)
        return self.CropUndistortedFrame(und_frame)
Пример #16
0
    def GetFrame(self, get_normal_frame_only=False):
        '''
		 @brief Pull image and structured light image

		 @param get_normal_frame_only (Only implemented to fit with the CameraLink)

		 @return frame, sl_frame
		'''
        self.__frame = GetImage(self.__folder +
                                self.__image_filenames[self.__frame_i],
                                gray=False)
        self.__sl_frame = GetImage(self.__folder +
                                   self.__sl_image_filenames[self.__frame_i],
                                   gray=False)

        if not (GetShape(self.__frame)[0] == GetShape(
                self.__sl_frame)[0]) or not (GetShape(self.__frame)[1]
                                             == GetShape(self.__sl_frame)[1]):
            raise Exception(
                'Normal image and sl image dimensions are not consistent.')

        self.__frame_i += 1
        return self.__frame, self.__sl_frame
Пример #17
0
    def ProcessBoundaryHoughLine(self,
                                 frame,
                                 boundary_hough_line,
                                 bound_thres=10,
                                 draw=False,
                                 color=(255, 255, 0)):
        '''
		 @brief Process boundary hough line.
		 	Check if the hough line indicates the blade edge, 
		 	and compute the heading angle according to the edge.

		 @param frame (original undistorted frame)
		 @param boundary_hough_line (Touple as (rho, theta))
		 @param bound_thres (Threshold in pixels for the line to be detected as a blade edge)
		 @param draw (default=False)
		 @param color (draw color if draw=True (default=(255,255,0)))

		 if draw:
		 	@return edge_line, rho, theta, frame (color frame with circles of hough line center points)
		 else:
			 @return edge_line, rho, theta
			 	(edge_line = True/False in regard to if the line is following a blade edge
			 	rho  = distance to edge (normalized)
			 	theta = heading angle along the detected edge (normalized.)
		'''
        width, height = GetShape(frame)
        rho, theta = boundary_hough_line
        rho, theta = self.NormalizeEdgeHeading(frame, rho, theta)

        x_center = height / 2.0 + np.cos(theta) * rho
        y_center = width / 2.0 + np.sin(theta) * rho

        edge_line = False
        if (np.abs(x_center - (height - 1)) > bound_thres and
                x_center > bound_thres) and (np.abs(y_center -
                                                    (width - 1)) > bound_thres
                                             and y_center > bound_thres):
            edge_line = True

        if draw:
            cv2.circle(frame, (int(round(x_center)), int(round(y_center))), 10,
                       color, 2)
            if edge_line:
                frame = self.DrawHeading(frame, (rho, theta), color=color)

        if draw:
            return edge_line, rho, theta, frame
        return edge_line, rho, theta
Пример #18
0
	def GetTipDetected(self, frame, selected_tip_heading, diag_percent=1/3.0):
		'''
		 @brief Detect if tip is detected within a given percent of the diagonal distance of the frame.

		 @param frame
		 @param selected_tip_heading
		 @param diag_percent (default=25%)

		 @return tip_detected, selected_tip_heading (Return: tip_detected = True/False, selected_tip_heading = (rho, theta))
		'''
		tip_detected = False
		if not(selected_tip_heading[0] == None):
			width, height = GetShape(frame)
			tip_threshold_distance = math.sqrt(width*width + height*height)*diag_percent
			if selected_tip_heading[0] <= tip_threshold_distance:
				tip_detected = True
		return tip_detected, selected_tip_heading
Пример #19
0
    def CalibrateStandardScaling(self, printInfo=False):
        '''
		 @brief Run standard scale calibration.

		 @param printInfo (Print info during calibration (default=False))
		'''
        self.SetScalingImages()

        standard_scale_distances = []
        mean_point_sizes = []
        for sl_fname, fname in self.__calib_img_fnames:
            frame = GetImage(fname, gray=False)
            sl_frame = GetImage(sl_fname, gray=False)
            try:
                delta_frame, keypoints, descriptors, frame, sl_frame = self.GetPointList(
                    frame,
                    sl_frame,
                    concatenate_points=True,
                    compute_descriptors=False)
            except DroneVisionError, err:
                warnings.simplefilter('always')
                warnings.warn(
                    str(err) +
                    ' - file: {0}, SL file: {1}'.format(fname, sl_fname),
                    Warning)
                warnings.simplefilter('default')
                continue
            width, height = GetShape(sl_frame)
            blockSize = int(
                math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0)) // 2)
            scaling_point_frame, mean_point_size = self.PointScaleMatch(
                delta_frame, keypoints, blockSize)
            standard_scale_distance = self.ComputeStandardScaleDistance(
                scaling_point_frame)
            if np.isnan(standard_scale_distance):
                err = 'No scaling points detected - file: {0}, SL file: {1}'.format(
                    fname, sl_fname)
                warnings.simplefilter('always')
                warnings.warn(str(err), Warning)
                warnings.simplefilter('default')
                continue

            standard_scale_distances.append(standard_scale_distance)
            mean_point_sizes.append(mean_point_size)
Пример #20
0
    def GetHoughLineSegment(self, frame, rho, theta):
        '''
		 @brief Get start and end position of hough line.
		 	The hough line is normally positioned within the frame, 
			but for theta > pi/2 will give positiones outside of the frame.
			The algorithm uses sin/cos/tan to compute the correct start and end position
			of the hough transform inside the frame. 
		 
		 @param frame
		 @param rho
		 @param theta

		 @return x_start, y_start, x_end, y_end
		'''
        width, height = GetShape(frame)
        size = math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0))
        x_start = np.cos(theta) * rho - size * (-np.sin(theta))
        y_start = np.sin(theta) * rho - size * np.cos(theta)
        x_end = np.cos(theta) * rho + size * (-np.sin(theta))
        y_end = np.sin(theta) * rho + size * np.cos(theta)
        return x_start, y_start, x_end, y_end
Пример #21
0
    def DrawHeading(self,
                    frame,
                    heading,
                    color=(255, 255, 0),
                    line_thick=2,
                    draw_arrow=True):
        '''
		 @brief Draw heading on frame.
		 	Heading is measured from image center.

		 @param frame 
		 @param heading (rho, theta)
		 @param color (default=(255,255,0) (YELLOW))
		 @param line_thick (default=5)
		 @param draw line as arrow

		 @return frame (color)
		'''
        rho, theta = heading[:2]
        if not (rho == None or theta == None):
            frame = CheckColor(frame)
            I_l, I_w = GetShape(frame)
            x0 = I_w / 2.0
            y0 = I_l / 2.0
            x1 = int(round(x0))
            y1 = int(round(y0))
            x2 = int(round(x0 + np.cos(theta) * rho))
            y2 = int(round(y0 + np.sin(theta) * rho))
            if draw_arrow:
                cv2.arrowedLine(frame, (x1, y1), (x2, y2),
                                color,
                                line_thick,
                                tipLength=0.1)
            else:
                cv2.line(frame, (x1, y1), (x2, y2), color, line_thick)
        return frame
    def TestBlobScaleDetector(self, folder, fn_frame, fn_slframe,
                              actual_distance):
        '''
		 @brief Test function for blob scale detector unit.

		 @param folder Input folder
		 @param fn_frame Frame filename without points.
		 @param fn_slframe Frame filename with points.
		 @param actual_distance (mm)
		'''
        import timeit, math
        import numpy as np
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow
        from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape

        print '\n'
        print '#----------- TESTING SCALING BASED BLOB DETECTION   \t---------------#'
        print '#----------- Image without points: {0} \t---------------#'.format(
            fn_frame)
        print '#----------- Image with points: {0}    \t---------------#'.format(
            fn_slframe)

        settings_inst = self.Settings.Settings()
        settings_inst.ChangeSetting('BASIC', 'reset_calibration', False)
        pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())

        pointDet.CalibrateBlobScaleDetector(printInfo=True,
                                            force_calibration=True)
        print 'Min distance between blobs: {0}'.format(
            pointDet.GetMinDistanceBetweenBlobs())

        fn_frame = folder + fn_frame
        fn_slframe = folder + fn_slframe

        delay = timeit.default_timer()
        frame = GetImage(fn_frame)
        sl_frame = GetImage(fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        total_delay = timeit.default_timer()

        delta_frame, points_kp, blob_desc, frame, sl_frame = pointDet.GetPointList(
            frame, sl_frame, concatenate_points=True, draw=True)

        delay = timeit.default_timer()
        width, height = GetShape(sl_frame)
        blockSize = int(
            math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0)) // 4)
        scaling_point_frame, mean_point_size = pointDet.PointScaleMatch(
            delta_frame, points_kp, blockSize)
        print 'Delay for computing disparity points: {0} sec'.format(
            timeit.default_timer() - delay)

        timeout = timeit.default_timer() - total_delay
        print 'Total delay for downscaling + undistort + blob + feature based stereopsis: {0} sec'.format(
            timeout)

        touple_frames = []
        touple_frames.append(('SL frame', sl_frame))
        touple_frames.append(('Delta frame', delta_frame))
        if not (self.CheckAllTests()):
            MatplotShow(touple_frames,
                        fn_frame + '_Blob_scale_test',
                        save_fig=self.save_figs,
                        save_fig_only=self.save_figs_only)
Пример #23
0
	def FindHeading(self, frame, selected_blade_edge_headings, rho_step_distance=None, default_rho_step_distance_diag_perc=1/4.0, rho_min_diag_perc=1/4.0, flip_heading=False):
		'''
		 @brief Find the next heading based on selected edge heading and current heading.
		 	Computed using equations (6.14 to 6.17) in MSc final thesis - Hans Erik Heggem
		 	Reset the heading by using ResetHeading()

		 @param frame
		 @param selected_blade_edge_headings
		 @param rho_step_distance (step distance in pixels for the drone to fly during each iteration. If None = then each step distance is set to a quarter the diagonal frame size (default=None))
		 @param default_rho_step_distance_diag_perc (Default step distance if rho_step_distance==None. Given as a percent of the diagonal frame size (default=0.25))
		 @param rho_min_diag_perc (Default percent of the diagonal length from center to image edge, used to calculate rho_min to navigate along a singel edge. Given as a percent between 0-1 (default=0.25))
		 @param flip_heading (Flip heading in opposite direction (True/False))

		 @return selected_heading (rho = distance to next heading position (in image frame), theta = heading angle in radian degrees)
		'''
		selected_heading = (0.0,0.0)
		if len(selected_blade_edge_headings) > 0:
			I_l, I_w 	= GetShape(frame)
			diag_size 	= np.sqrt(I_l*I_l + I_w*I_w)
			if rho_step_distance == None:
				rho_step_distance = diag_size*default_rho_step_distance_diag_perc

			if len(selected_blade_edge_headings) > 1:
				if self.VerifyEdgeQuadrant(selected_blade_edge_headings[0]) and self.VerifyEdgeQuadrant(selected_blade_edge_headings[1]):
					omega = (selected_blade_edge_headings[0][1] + selected_blade_edge_headings[1][1])/2.0 + np.pi
					if not(self.GetMovingTowardsTipOrRoot()): # True for moving towards tip.
						omega -= np.pi
					if flip_heading:
						omega += np.pi
				else: # Either one of the edges are in the incorrect quadrant - estimate heading based on closest edge
					if selected_blade_edge_headings[0][0] < selected_blade_edge_headings[1][0]:
						selected_blade_edge_headings = selected_blade_edge_headings[0:1]
					else:
						selected_blade_edge_headings = selected_blade_edge_headings[1:2]
					return self.FindHeading(frame, selected_blade_edge_headings, rho_step_distance, default_rho_step_distance_diag_perc, rho_min_diag_perc, flip_heading)
			else:
				sign 		= 1.0
				flip_rot 	= 0.0
				if not(self.VerifyEdgeQuadrant(selected_blade_edge_headings[0])):
					sign  		= -1.0 	# Everything must be flipped if the edge is in the wrong quadrant
					flip_rot 	= np.pi # Edge is in the wrong quadrant, so find rho_f in the correct direction.

				edge_rho, edge_theta, max_line, hor_or_vert = selected_blade_edge_headings[0]
				I_cos 		= I_w*np.cos(edge_theta + flip_rot)
				I_sin 		= I_l*np.sin(edge_theta + flip_rot)
				diag_rot_l 	= 0.5*np.sqrt(I_cos*I_cos + I_sin*I_sin)
				rho_f 		= diag_rot_l - sign*edge_rho
				rho_min 	= diag_rot_l*rho_min_diag_perc
				
				if flip_rot != 0.0: # Flip situations if the edge is in wrong quadrant
					rho_f_cp 	= rho_f
					rho_f 		= rho_min
					rho_min 	= rho_f_cp

				if rho_min <= rho_f:
					W_f 	= 2 - rho_min/rho_f
				else:
					W_f 	= rho_f/rho_min

				if max_line: # Mirrored direction depending on the side of the blade to follow.
					sign *= -1.0

				if not(self.GetMovingTowardsTipOrRoot()): # True for moving towards tip.
					sign *= -1.0
				if flip_heading:
					sign *= -1.0

				omega = edge_theta + sign*(np.pi/2)*W_f

			selected_heading = (rho_step_distance, omega)
		return selected_heading #else
Пример #24
0
    def PointScaleMatch(self,
                        frame,
                        keypoints,
                        blockSize,
                        filtrate_scaling_points=None):
        '''
		 @brief Compute scaling map between points.

		 @param frame
		 @param keypoints
		 @param blockSize (Should be about a quarter of the frame size if the points are distributed evenly on the frame)
		 @param filtrate_scaling_points (If None, then is is set by the class instance settings (default=None))

		 @return scaling_point_frame, mean_point_size
		'''
        # Class instance settings
        if filtrate_scaling_points == None:
            filtrate_scaling_points = self.__filtrate_points

        height, width = GetShape(frame)
        points_frame = np.zeros((height, width), dtype=np.uint16)
        scaling_point_frame = np.zeros((height, width))
        float_points_list = [None] * (width * height)
        mean_point_size = 0.0
        for point in keypoints:
            x = int(round(point.pt[1]))
            y = int(round(point.pt[0]))
            float_points_list[x * width + y] = point
            mean_point_size += point.size
            points_frame[x, y] = point.size
        mean_point_size /= len(keypoints)
        blockSize_radi = blockSize // 2
        points_pos = np.argwhere(points_frame > 0)
        for i in range(len(points_pos)):
            x = points_pos[i, 0]
            y = points_pos[i, 1]
            point = float_points_list[x * width + y]
            if point == None:
                raise Exception('None point: {0}'.format(point))
            x_offset_neg = 0
            x_offset_pos = 0
            y_offset_neg = 0
            y_offset_pos = 0
            if x - blockSize_radi < 0:
                x_offset_neg = x - blockSize_radi
            elif x + blockSize_radi >= height:
                x_offset_pos = x + blockSize_radi - (height - 1)
            if y - blockSize_radi < 0:
                y_offset_neg = y - blockSize_radi
            elif y + blockSize_radi >= width:
                y_offset_pos = y + blockSize_radi - (width - 1)
            x_block_start = x - blockSize_radi - x_offset_neg
            y_block_start = y - blockSize_radi - y_offset_neg
            block = points_frame[x_block_start:(x + 1) + blockSize_radi -
                                 x_offset_pos, y_block_start:(y + 1) +
                                 blockSize_radi - y_offset_pos]
            x_block_center = len(block) // 2
            y_block_center = len(block[x_block_center]) // 2
            block[x_block_center, y_block_center] = 0
            block_disparities = np.argwhere(block > 0)
            shortest_distance = -1
            for j in range(len(block_disparities)):
                x_block = x_block_start + block_disparities[j, 0]
                y_block = y_block_start + block_disparities[j, 1]
                block_point = float_points_list[x_block * width + y_block]
                if block_point == None:
                    raise Exception(
                        'None block point: {0}'.format(block_point))
                distance = math.sqrt(
                    math.pow(point.pt[0] - block_point.pt[0], 2) +
                    math.pow(point.pt[1] - block_point.pt[1], 2))
                if distance < shortest_distance or shortest_distance < 0:
                    shortest_distance = distance
            if shortest_distance >= 0:
                scaling_point_frame[x, y] = shortest_distance

        if filtrate_scaling_points:
            scaling_point_frame = self.FiltrateScalingPoints(
                scaling_point_frame)

        return scaling_point_frame, mean_point_size
Пример #25
0
def FindLineLimits(frame,
                   hough_lines,
                   keypoints,
                   radi_threshold=None,
                   radi_threshold_tuning_param=2.0,
                   draw_hough_matrix=False,
                   draw_bounded_lines=False,
                   draw_max_min_lines=False,
                   draw_arrowed_bounded_lines=True):
    '''
	 @brief Find boundaries for all hough lines according to the point map.
	 	Segments an object based on the point list by limiting the hough lines and detecting the boundary lines.

	 @param frame (grayscale)
	 @param hough_lines List of hough lines (rho, theta).
	 @param keypoints List of detected point positions.
	 @param radi_threshold Threshold in pixels to search for in vertical and horizontal axis (If none, then it is set to the biggest blob size)
	 @param radi_threshold_tuning_param Threshold tuning parameter (default=2 when using biggest blob size. < 0.5 is recommended when using distance betwen blobs).
	 @param draw_hough_matrix Draw hough lines matrix.
	 @param draw_bounded_lines Draw bounded lines.
	 @param draw_max_min_lines Draw detected max min lines.
	 @param draw_arrowed_bounded_lines

	 @return frame, bounded_lines, max_hor_line, min_hor_line, max_vert_line, min_vert_line 
	 	frame - Usually grayscale, but rgb frame if any draw flag == True
	 	bounded_lines - list with vertical and horizontal lines. 
	 					bounded_lines[0] = horizontal lines list, bounded_lines[1] = vertical lines list, 
	 					where each index describing a line as ((x1,y1), (x2,y2))
	 	max_min_lines - List of max and min horizontal and vertical lines, in this order: 
		 	max_hor_line - Bottom boundary line on the horizontal axis (Note: bottom = max index in images) as ((x1,y1), (x2,y2))
		 	min_hor_line - Top boundary line on the horizontal axis (Note: top = min index in images) as ((x1,y1), (x2,y2))
		 	max_vert_line - Right most boundary line on the vertical axis as ((x1,y1), (x2,y2))
		 	min_vert_line - Left most boundary line on the horizontal axis as ((x1,y1), (x2,y2))
	'''

    x_points = np.zeros(len(keypoints))  #Width
    y_points = np.zeros(len(keypoints))  #height
    biggest_size = 0.0
    for i in range(len(keypoints)):
        if keypoints[i].size > biggest_size:
            biggest_size = keypoints[i].size
        x_points[i] = keypoints[i].pt[0]
        y_points[i] = keypoints[i].pt[1]

    # Tuning variable - search along vertical or horizontal axis is limited to this radius.
    if radi_threshold == None:
        radi_threshold = biggest_size
    radi_threshold *= radi_threshold_tuning_param

    min_vert_points = [[], []]
    max_vert_points = [[], []]
    min_hor_points = [[], []]
    max_hor_points = [[], []]
    bounded_lines = [[], []]
    width, height = GetShape(frame)

    size = math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0))
    max_vert_line_y_pos = -size * 10  # Set invalid 'large' values to begin with
    min_vert_line_y_pos = size * 10
    max_hor_line_x_pos = -size * 10
    min_hor_line_x_pos = size * 10

    if draw_hough_matrix:
        frame = DrawHoughLines(frame, hough_lines)

    for rho, theta in hough_lines:
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a * rho
        y0 = b * rho
        if round(np.rad2deg(a)) == 0:  # Horizontal lines - search y_points
            y_m_above = np.argwhere(y_points >= y0 - radi_threshold).T[0]
            y_m_below = np.argwhere(y_points < y0 + radi_threshold).T[0]
            y_m_ab_b = np.in1d(y_m_above, y_m_below)
            y_m_be_b = np.in1d(y_m_below, y_m_above)
            y_m_ab_in = np.argwhere(y_m_ab_b == True).T[0]
            y_m_be_in = np.argwhere(y_m_be_b == True).T[0]
            y_m_temp = np.zeros(len(y_m_ab_in) + len(y_m_be_in))
            len_ab = len(y_m_ab_in)
            for i in range(len(y_m_temp)):
                if i < len_ab:
                    y_m_temp[i] = y_m_above[y_m_ab_in[i]]
                else:
                    y_m_temp[i] = y_m_below[y_m_be_in[i - len_ab]]
            y_m = np.unique(y_m_temp)
            if len(y_m) < 2:  # Ignore lines of only a single point.
                continue
            y_dist = {}
            for y in y_m:
                y = int(y)
                y_dist[x_points[y]] = y_points[y]
            sorted_y = sorted(y_dist.items(), key=operator.itemgetter(0))
            min_point = sorted_y[0]
            max_point = sorted_y[-1:][0]

            xp = np.zeros(len(y_dist))
            fp = np.zeros(len(y_dist))
            i = 0
            for x, y in sorted_y:
                xp[i] = x
                fp[i] = y
                i += 1
            y_inter = np.interp([min_point[0], max_point[0]], xp, fp)
            x1 = int(round(min_point[0]))
            y1 = int(round(y_inter[0]))
            x2 = int(round(max_point[0]))
            y2 = int(round(y_inter[1]))

            min_hor_points[0].append(min_point[0])  #x
            min_hor_points[1].append(y_inter[0])  #y
            max_hor_points[0].append(max_point[0])  #x
            max_hor_points[1].append(y_inter[1])  #y
            average_line_x_pos = y_inter[0] + (y_inter[1] - y_inter[0]) / 2
            if average_line_x_pos > max_hor_line_x_pos:
                max_hor_line_x_pos = average_line_x_pos
                max_hor_line = ((x1, y1), (x2, y2))
            if average_line_x_pos < min_hor_line_x_pos:
                min_hor_line_x_pos = average_line_x_pos
                min_hor_line = ((x1, y1), (x2, y2))
            bounded_lines[0].append(((x1, y1), (x2, y2)))
        elif round(np.rad2deg(b)) == 0:  # vertical lines - search x_points
            x_m_above = np.argwhere(x_points >= x0 - radi_threshold).T[0]
            x_m_below = np.argwhere(x_points < x0 + radi_threshold).T[0]
            x_m_ab_b = np.in1d(x_m_above, x_m_below)
            x_m_be_b = np.in1d(x_m_below, x_m_above)
            x_m_ab_in = np.argwhere(x_m_ab_b == True).T[0]
            x_m_be_in = np.argwhere(x_m_be_b == True).T[0]
            x_m_temp = np.zeros(len(x_m_ab_in) + len(x_m_be_in))
            len_ab = len(x_m_ab_in)
            for i in range(len(x_m_temp)):
                if i < len_ab:
                    x_m_temp[i] = x_m_above[x_m_ab_in[i]]
                else:
                    x_m_temp[i] = x_m_below[x_m_be_in[i - len_ab]]
            x_m = np.unique(x_m_temp)
            if len(x_m) < 2:  # Ignore lines of only a single point.
                continue
            x_dist = {}
            for x in x_m:
                x = int(x)
                x_dist[y_points[x]] = x_points[x]
            sorted_x = sorted(x_dist.items(), key=operator.itemgetter(0))
            min_point = sorted_x[0]
            max_point = sorted_x[-1:][0]

            xp = np.zeros(len(x_dist))
            fp = np.zeros(len(x_dist))
            i = 0
            for x, y in sorted_x:
                xp[i] = x
                fp[i] = y
                i += 1
            x_inter = np.interp([min_point[1], max_point[1]], xp, fp)
            x1 = int(round(x_inter[0]))
            y1 = int(round(min_point[0]))
            x2 = int(round(x_inter[1]))
            y2 = int(round(max_point[0]))

            min_vert_points[0].append(x_inter[0])  #x
            min_vert_points[1].append(min_point[0])  #y
            max_vert_points[0].append(x_inter[1])  #x
            max_vert_points[1].append(max_point[0])  #y
            average_line_y_pos = x_inter[0] + (x_inter[1] - x_inter[0]) / 2
            if average_line_y_pos > max_vert_line_y_pos:
                max_vert_line_y_pos = average_line_y_pos
                max_vert_line = ((x1, y1), (x2, y2))
            if average_line_y_pos < min_vert_line_y_pos:
                min_vert_line_y_pos = average_line_y_pos
                min_vert_line = ((x1, y1), (x2, y2))
            bounded_lines[1].append(((x1, y1), (x2, y2)))
        else:
            raise DroneVisionError('find_line_limits_unexpected_angle')

        if draw_bounded_lines:
            color = (0, 255, 255)
            line_thick = 2
            if draw_arrowed_bounded_lines:
                tipLength = 0.05
                cv2.arrowedLine(frame, (x1, y1), (x2, y2),
                                color,
                                line_thick,
                                tipLength=tipLength)
                cv2.arrowedLine(frame, (x2, y2), (x1, y1),
                                color,
                                line_thick,
                                tipLength=tipLength)
            else:
                cv2.line(frame, (x1, y1), (x2, y2), color, line_thick)

    try:
        max_min_lines = [
            max_hor_line, min_hor_line, max_vert_line, min_vert_line
        ]
    except:
        raise DroneVisionError('find_line_limits_no_hor_or_vert_found')

    if draw_max_min_lines and len(hough_lines) > 0:
        cv2.line(frame, max_hor_line[0], max_hor_line[1], (255, 0, 255), 4)
        cv2.line(frame, min_hor_line[0], min_hor_line[1], (255, 0, 255), 4)
        cv2.line(frame, max_vert_line[0], max_vert_line[1], (255, 0, 255), 4)
        cv2.line(frame, min_vert_line[0], min_vert_line[1], (255, 0, 255), 4)

    return frame, bounded_lines, max_min_lines
Пример #26
0
def HoughLineEdgePoints(frame,
                        edge_points,
                        horizontal_points,
                        hough_peak_param=1.2,
                        dilation_kernel_size=3,
                        dilation_iterations=1):
    '''
	 @brief Speeded up Houg lines transform for lines by iterating over known key point positions.
	 	Inspired by: https://alyssaq.github.io/2014/understanding-hough-transform/

	 @param frame
	 @param edge_points List of detected edge points as [[x], [y]]
	 @param horizontal_points Set True for detecting horizontal lines, and False for detecting vertical lines.
	 		The lines will be focused around the vertical or horizontal axis.
	 @param hough_peak_param Delimiter for increasing number of peaks to validate for finding the most significant peaks. Must be >= 1.0.
	 @param dilation_kernel_size (Increase strong areas of peak points by dilation. 
	 	Set the kernel size for dilation as a dilation_kernel_size*dilation_kernel_size (f.eks 3*3 kernel size). 
	 	Set to 1 to give the dilation no effect. (default=3))
	 @param dilation_iterations (Number of dilation iterations to increase the width of strong areas (default=1))

	 @return (rho, theta) (Distance and angle of the most significant edge.)
	'''

    # Rho and Theta ranges
    if horizontal_points:
        start_degree = 0.0
        end_degree = 45.0
        step_degree = 1.0
        thetas_small = np.deg2rad(
            np.arange(start_degree, end_degree, step_degree))
        start_degree = 135.0
        end_degree = 180.0
        step_degree = 1.0
        thetas_big = np.deg2rad(
            np.arange(start_degree, end_degree, step_degree))
        thetas = np.concatenate((thetas_small, thetas_big))
    else:  # vertical points
        start_degree = 45.0
        end_degree = 135.0
        step_degree = 1.0
        thetas = np.deg2rad(np.arange(start_degree, end_degree, step_degree))

    width, height = GetShape(frame)
    diag_len = np.ceil(np.sqrt(width * width + height * height))  # max_dist
    rhos = np.linspace(-np.int(diag_len), np.int(diag_len),
                       np.int(diag_len) * 2)

    # Cache some reusable values
    cos_t = np.cos(thetas)
    sin_t = np.sin(thetas)
    num_thetas = len(thetas)

    # Hough accumulator array of theta vs rho
    accumulator = np.zeros((int(2 * diag_len), num_thetas), dtype=np.float32)
    acc_shape = GetShape(accumulator)

    # Vote in the hough accumulator
    for i in range(len(edge_points[0])):
        x = edge_points[0][i]
        y = edge_points[1][i]

        for t_idx in range(num_thetas):
            # Calculate rho. diag_len is added for a positive index
            rho = int(round(x * cos_t[t_idx] + y * sin_t[t_idx]) + diag_len)
            accumulator[rho, t_idx] += 1

    # Dilate the accumulator so that close neighboring voting points are stronger, and add the Gaussian smoothed accumulator to highlight the strongest peak in the strongest areas.
    accumulator = cv2.dilate(accumulator,
                             np.ones(
                                 (dilation_kernel_size, dilation_kernel_size),
                                 dtype=np.uint8),
                             iterations=dilation_iterations)
    accumulator += cv2.GaussianBlur(
        accumulator, (dilation_kernel_size, dilation_kernel_size), 0
    )  # Let sigma be calculated according to the kernel size. See cv2 doc.

    # Peak finding based on max votes.
    id_m = np.argwhere(accumulator >= np.max(accumulator) / hough_peak_param)
    len_id_m = len(id_m)
    rhos_res = [0] * len_id_m
    thetas_res = [0] * len_id_m
    for i in range(len_id_m):
        idx = id_m[i][0] * acc_shape[1] + id_m[i][1]
        rhos_res[i] = rhos[idx / acc_shape[1]]
        thetas_res[i] = thetas[idx % acc_shape[1]]

    rho = np.median(rhos_res)
    theta = np.median(thetas_res)

    return (rho, theta)
Пример #27
0
def HoughLinesPointMatrix(frame,
                          keypoints,
                          min_lines=2,
                          radi_threshold=None,
                          radi_threshold_tuning_param=2.0):
    '''
	 @brief Speeded up Houg lines transform for lines by iterating over known key point positions.
	 	Inspired by: https://alyssaq.github.io/2014/understanding-hough-transform/

	 @param frame
	 @param keypoints List of detected points (using the blob detection algorithm.)
	 @param min_lines Minimum lines to finally end up with. Set to -1 to hinder any concateniation of detected lines.
	 @param radi_threshold Threshold in pixels to search for in vertical and horizontal axis (If none, then it is set to the biggest blob size)
	 @param radi_threshold_tuning_param Threshold tuning parameter (default=2 when using biggest blob size. < 0.5 is recommended when using distance betwen blobs).

	 @return hough_lines Returned as list of touples (rho, theta)
	'''
    # Rho and Theta ranges
    thetas = np.deg2rad(np.array([0.0, 90.0]))
    width, height = GetShape(frame)
    diag_len = np.ceil(np.sqrt(width * width + height * height))  # max_dist
    rhos = np.linspace(-np.int(diag_len), np.int(diag_len),
                       np.int(diag_len) * 2)

    # Cache some resuable values
    cos_t = np.cos(thetas)
    sin_t = np.sin(thetas)
    num_thetas = len(thetas)

    # Hough accumulator array of theta vs rho
    accumulator = np.zeros((int(2 * diag_len), num_thetas), dtype=np.float32)
    acc_shape = GetShape(accumulator)

    biggest_point = 0
    # Vote in the hough accumulator
    for i in range(len(keypoints)):
        x = keypoints[i].pt[0]
        y = keypoints[i].pt[1]

        if keypoints[i].size > biggest_point:
            biggest_point = keypoints[i].size

        for t_idx in range(num_thetas):
            # Calculate rho. diag_len is added for a positive index
            rho = int(round(x * cos_t[t_idx] + y * sin_t[t_idx]) + diag_len)
            accumulator[rho, t_idx] += 1

    # Peak finding based on max votes
    id_m = np.argwhere(accumulator >= 1)
    len_id_m = len(id_m)
    hough_lines = [0] * len_id_m
    #PrintHoughAccumulator(accumulator, id_m, rhos)
    for i in range(len_id_m):
        idx = id_m[i][0] * acc_shape[1] + id_m[i][1]
        rho = rhos[idx / acc_shape[1]]
        theta = thetas[idx % acc_shape[1]]
        hough_lines[i] = (rho, theta)

    if radi_threshold != None:
        origin_threshold = radi_threshold * radi_threshold_tuning_param
    else:
        origin_threshold = biggest_point
    threshold = origin_threshold

    n_lines = len(hough_lines
                  ) + 1  #Stop concatenating when there is no change in n_lines
    while len(hough_lines) > min_lines and n_lines - len(
            hough_lines) > 0 and min_lines > 0:
        n_lines = len(hough_lines)
        hough_lines = concatenateLines(hough_lines, threshold)
        threshold += origin_threshold / 2
    return hough_lines  # returned as touples of (rho, theta)
    def TestPointMatchStereoVision(self,
                                   folder,
                                   left_fn_frame,
                                   left_fn_slframe,
                                   right_fn_frame,
                                   right_fn_slframe,
                                   baseline,
                                   actual_distance,
                                   filtrate_3Dpoints=True,
                                   use_triangulation=True,
                                   use_opencv_triangulation=True,
                                   use_block_matching=True,
                                   use_brute_force_matching=False):
        '''
		 @brief Test function for stereo detection unit using point matching.

		 @param folder Input folder 
		 @param left_fn_frame Frame filename without points.
		 @param left_fn_slframe Frame filename with points.
		 @param right_fn_frame Frame filename without points.
		 @param right_fn_slframe Frame filename with points.
		 @param baseline (mm)
		 @param actual_distance (mm)
		 @param filtrate_3Dpoints (Filtrate 3D points which are outside of the standard deviation)
		 @param use_triangulation (True for using tringulation method, False for using easy disparity to depth method)
		 @param use_opencv_triangulation (True for using the opencv implementation of triangulation)
		 @param use_block_matching (use the block matching feature matching method)
		 @param use_brute_force_matching (use the brute force matching method (if use_block_matching=False))
		'''
        import timeit
        import numpy as np
        from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow

        if use_block_matching:
            title = 'BLOCK SEARCH'
        else:
            if use_brute_force_matching:
                title = 'BRUTE F.'
            else:
                title = 'FLANN'
        if use_triangulation:
            extra_info = 'TRIANGULATION TO DEPTH'
        else:
            extra_info = 'DISPARITY TO DEPTH'
        print '\n'
        print '#----------- TESTING FEATURE BASED STEREO DETECTION - {0} MATCHING - {1}\t---------------#'.format(
            title, extra_info)
        print '#----------- Left Image without points: {0} \t\t\t\t\t---------------#'.format(
            left_fn_frame)
        print '#----------- Right Image without points: {0} \t\t\t\t---------------#'.format(
            right_fn_frame)

        settings_inst = self.Settings.Settings()
        settings_inst.ChangeSetting('BASIC', 'reset_calibration', False)
        #settings_inst.ChangeSetting('CALIB', 'baseline', baseline)
        settings_inst.ChangeSetting('F_STEREO', 'use_triangulation',
                                    use_triangulation)
        settings_inst.ChangeSetting('F_STEREO', 'use_cv2_triangulation',
                                    use_opencv_triangulation)
        settings_inst.ChangeSetting('F_STEREO', 'use_block_matching',
                                    use_block_matching)
        settings_inst.ChangeSetting('F_STEREO', 'use_brute_force',
                                    use_brute_force_matching)
        left_pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())
        left_pointDet.CalibratePointDetection(printInfo=True)
        right_pointDet = self.PointDetection.PointDetection(
            False, settings_inst.GetSettings())
        right_pointDet.CalibratePointDetection(printInfo=False)

        #if not(left_pointDet.GetBaseline() == baseline) or not(right_pointDet.GetBaseline() == baseline):
        #	raise Exception('Fail baseline!')

        left_fn_frame = folder + left_fn_frame
        left_fn_slframe = folder + left_fn_slframe
        right_fn_frame = folder + right_fn_frame
        right_fn_slframe = folder + right_fn_slframe

        delay = timeit.default_timer()
        left_frame = GetImage(left_fn_frame)
        left_sl_frame = GetImage(left_fn_slframe)
        right_frame = GetImage(right_fn_frame)
        right_sl_frame = GetImage(right_fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        #MatplotShow([('left', left_sl_frame), ('right', right_sl_frame)], 'Feature Stereopsis', save_fig=self.save_figs, save_fig_only=self.save_figs_only)

        left_delta_frame, left_points_kp, left_blob_desc, left_frame, left_sl_frame = left_pointDet.GetPointList(
            left_frame,
            left_sl_frame,
            compute_descriptors=not (use_block_matching),
            draw=True)
        right_delta_frame, right_points_kp, right_blob_desc, right_frame, right_sl_frame = right_pointDet.GetPointList(
            right_frame,
            right_sl_frame,
            compute_descriptors=not (use_block_matching),
            draw=True)

        total_delay = timeit.default_timer()

        points3D, match_points_frame = left_pointDet.Get3DPoints(
            GetShape(left_delta_frame),
            GetShape(right_delta_frame),
            left_points_kp,
            left_blob_desc,
            right_points_kp,
            right_blob_desc,
            filtrate_3Dpoints=filtrate_3Dpoints,
            draw=True,
            left_delta_frame=left_delta_frame,
            right_delta_frame=right_delta_frame)
        points3D_m = left_pointDet.Points3DToMatrix(points3D)
        average_point3D = np.mean(points3D_m, axis=1)
        std_point3D = np.std(points3D_m, axis=1)

        timeout = timeit.default_timer() - total_delay
        print 'Total delay for feature based stereopsis: {0} sec'.format(
            timeout)

        delay = timeit.default_timer()
        points3D = left_pointDet.Get3DPoints(
            GetShape(left_delta_frame),
            GetShape(right_delta_frame),
            left_points_kp,
            left_blob_desc,
            right_points_kp,
            right_blob_desc,
            filtrate_3Dpoints=filtrate_3Dpoints,
            draw=False,
            left_delta_frame=left_delta_frame,
            right_delta_frame=right_delta_frame)
        timeout_stereopsis = timeit.default_timer() - delay

        for point3D in points3D:
            print 'Point3D: x = {0} \t y = {1} \t z = {2}'.format(
                point3D[0, 0], point3D[1, 0], point3D[2, 0])
        print 'Average Point3D: x = {0} \t y = {1} \t z = {2}'.format(
            average_point3D[0, 0], average_point3D[1, 0], average_point3D[2,
                                                                          0])
        print 'STD Point3D: x = {0} \t y = {1} \t z = {2}'.format(
            std_point3D[0, 0], std_point3D[1, 0], std_point3D[2, 0])
        print 'Delay for computing distance points: {0} sec, average distance: {1} mm, actual distance: {2} mm, distance error: {3} mm, baseline = {4} mm'.format(
            timeout_stereopsis, average_point3D[2, 0], actual_distance,
            average_point3D[2, 0] - actual_distance, baseline)

        #MatplotShow([(title, match_points_frame)], save_fig=self.save_figs, save_fig_only=self.save_figs_only)

        #title += ':Z(mean)={0:.2f}mm'.format(average_point3D[2,0])

        if not (self.CheckAllTests()):
            if self.show_delta_frames:
                MatplotShow([('left', left_delta_frame),
                             ('right', right_delta_frame)],
                            left_fn_frame +
                            '_Feature_stereo_test_delta_frames',
                            save_fig=self.save_figs,
                            save_fig_only=self.save_figs_only)

        return (title, match_points_frame)
Пример #29
0
def DetectBoundaryEdges(origin_frame,
                        bounded_lines,
                        max_min_lines,
                        scale_threshold=1.0,
                        line_perc=2.0 / 3,
                        filtrate_edge_points=True,
                        draw=False,
                        print_hough_positions=False):
    '''
	 @brief Detect all boundary edges, and compute the corresponding line.
	 	Step 1:
	 		For each line in (max_hor_line, min_hor_line, max_vert_line, min_vert_line):
	 			detect if boundary indicates an open space towards the frame edges, indicating end of the blade.
	 	Step 2:
	 		If any max/min boundary line indicates end of blade (detected end of blade region), then 
	 		find all blade edge points by using DetectLineEdge, starting from each horizontal/vertical max/min point and towards the frame end point.
	 	Step 3:
	 		Use hough transform to derive the longest and most significant line for each detected blade edge.

	 @param origin_frame (Original grayscale frame without laser points)
	 @param bounded_lines (bounded lines from FindLineLimits)
	 @param max_min_lines (List of max min horizontal and vertical lines, in this order:
		 - max_hor_line (max_hor_line from FindLineLimits)
		 - min_hor_line (min_hor_line from FindLineLimits)
		 - max_vert_line (max_vert_line from FindLineLimits)
		 - min_vert_line (min_vert_lines from FindLineLimits))
	 @param scale_threshold (scaling threshold variable for considering one of the limit lines as a possible blade edge - 
	 	set between 0 -> 1 (float), where 1 = 100 percent of with or height frame size. 
	 	100 percent will mean that all boundary lines are considered as blade edges.)
	 @param line_perc (Set how many percent of the line to use for detecting the edge. Float between 0 -> 1. (default=2.0/3))
	 @param filtrate_edge_points (Filtrate detected edge points that deviate outside of standard deviation.)
	 @param draw (draw hough lines and points (used during testing))
	 @param print_hough_positions (print hough line positions (rho, theta) (default=False))


	 @return edgel_frame, hough_lines, edge_points 
	 	(Return:
	 		edgel_frame Edge map
	 		hough_lines = [max_hor_hough_line, min_hor_hough_line, max_vert_hough_line, min_vert_hough_line])
					Each index as line of (rho, theta)
	'''
    width, height = GetShape(origin_frame)
    edgel_frame = Canny(origin_frame)
    hor_edge_region_threshold = width * scale_threshold
    vert_edge_region_threshold = height * scale_threshold

    max_hor_line = max_min_lines[0]
    min_hor_line = max_min_lines[1]
    max_vert_line = max_min_lines[2]
    min_vert_line = max_min_lines[3]

    max_hor_line_edge_points = [[], []]  # [x],[y]
    max_hor_hough_line = (None, None)
    average_line_x_pos = max_hor_line[0][1] + (max_hor_line[1][1] -
                                               max_hor_line[0][1]) / 2
    if average_line_x_pos < hor_edge_region_threshold:  # Edge detected on the bottom
        for vert_line in bounded_lines[
                1]:  # Check all vertical lines (max points)
            len_line = int(
                round((vert_line[1][1] - vert_line[0][1]) * line_perc))
            max_point = vert_line[1]
            y_edge_ind = DetectLineEdge(edgel_frame, False, max_point[0],
                                        max_point[1], width - 1)
            max_hor_line_edge_points[0].append(max_point[0])
            max_hor_line_edge_points[1].append(y_edge_ind)
        if filtrate_edge_points:
            max_hor_line_edge_points[0], max_hor_line_edge_points[
                1] = FiltrateEdgePoints(max_hor_line_edge_points[0],
                                        max_hor_line_edge_points[1])
        max_hor_hough_line = HoughLineEdgePoints(edgel_frame,
                                                 max_hor_line_edge_points,
                                                 False)

    min_hor_line_edge_points = [[], []]  # [x],[y]
    min_hor_hough_line = (None, None)
    average_line_x_pos = min_hor_line[0][1] + (min_hor_line[1][1] -
                                               min_hor_line[0][1]) / 2
    if average_line_x_pos > width - hor_edge_region_threshold:  # Edge detected on top
        for vert_line in bounded_lines[
                1]:  # Check all vertical lines (min points)
            len_line = int(
                round((vert_line[1][1] - vert_line[0][1]) * line_perc))
            min_point = vert_line[0]
            y_edge_ind = DetectLineEdge(edgel_frame, False, min_point[0],
                                        min_point[1], 0)
            min_hor_line_edge_points[0].append(min_point[0])
            min_hor_line_edge_points[1].append(y_edge_ind)
        if filtrate_edge_points:
            min_hor_line_edge_points[0], min_hor_line_edge_points[
                1] = FiltrateEdgePoints(min_hor_line_edge_points[0],
                                        min_hor_line_edge_points[1])
        min_hor_hough_line = HoughLineEdgePoints(edgel_frame,
                                                 min_hor_line_edge_points,
                                                 False)

    max_vert_line_edge_points = [[], []]  # [x],[y]
    max_vert_hough_line = (None, None)
    average_line_y_pos = max_vert_line[0][0] + (max_vert_line[1][0] -
                                                max_vert_line[0][0]) / 2
    if average_line_y_pos < vert_edge_region_threshold:  # Edge detected to the right
        for hor_line in bounded_lines[
                0]:  # Check all horizontal lines (max points)
            len_line = int(round(
                (hor_line[1][0] - hor_line[0][0]) * line_perc))
            max_point = hor_line[1]
            x_edge_ind = DetectLineEdge(edgel_frame, True, max_point[1],
                                        max_point[0], height - 1)
            max_vert_line_edge_points[0].append(x_edge_ind)
            max_vert_line_edge_points[1].append(max_point[1])
        if filtrate_edge_points:
            max_vert_line_edge_points[1], max_vert_line_edge_points[
                0] = FiltrateEdgePoints(max_vert_line_edge_points[1],
                                        max_vert_line_edge_points[0])
        max_vert_hough_line = HoughLineEdgePoints(edgel_frame,
                                                  max_vert_line_edge_points,
                                                  True)

    min_vert_line_edge_points = [[], []]  # [x],[y]
    min_vert_hough_line = (None, None)
    average_line_y_pos = min_vert_line[0][0] + (min_vert_line[1][0] -
                                                min_vert_line[0][0]) / 2
    if average_line_y_pos > height - vert_edge_region_threshold:  # Edge detected to the left
        for hor_line in bounded_lines[
                0]:  # Check all horizontal lines (min points)
            len_line = int(round(
                (hor_line[1][0] - hor_line[0][0]) * line_perc))
            min_point = hor_line[0]
            x_edge_ind = DetectLineEdge(edgel_frame, True, min_point[1],
                                        min_point[0], 0)
            min_vert_line_edge_points[0].append(x_edge_ind)
            min_vert_line_edge_points[1].append(min_point[1])
        if filtrate_edge_points:
            min_vert_line_edge_points[1], min_vert_line_edge_points[
                0] = FiltrateEdgePoints(min_vert_line_edge_points[1],
                                        min_vert_line_edge_points[0])
        min_vert_hough_line = HoughLineEdgePoints(edgel_frame,
                                                  min_vert_line_edge_points,
                                                  True)

    if max_hor_hough_line[0] == None or min_hor_hough_line[
            0] == None or max_vert_hough_line[
                0] == None or min_vert_hough_line[0] == None:
        raise DroneVisionError('detect_boundary_edge_not_found_all_edge_lines')

    hough_lines = [
        max_hor_hough_line, min_hor_hough_line, max_vert_hough_line,
        min_vert_hough_line
    ]
    if draw:
        for i in range(len(max_hor_line_edge_points[0])):
            if i == 0:
                color = (0, 0, 255)  #DARK BLUE
                edgel_frame = DrawHoughLine(edgel_frame, max_hor_hough_line,
                                            color)
            x = max_hor_line_edge_points[0][i]
            y = max_hor_line_edge_points[1][i]
            cv2.circle(edgel_frame, (x, y), 5, color, -1)
        for i in range(len(min_hor_line_edge_points[0])):
            if i == 0:
                color = (0, 255, 255)  #LIGHT BLUE
                edgel_frame = DrawHoughLine(edgel_frame, min_hor_hough_line,
                                            color)
            x = min_hor_line_edge_points[0][i]
            y = min_hor_line_edge_points[1][i]
            cv2.circle(edgel_frame, (x, y), 5, color, -1)
        for i in range(len(max_vert_line_edge_points[0])):
            if i == 0:
                color = (204, 0, 204)  #PURPLE
                edgel_frame = DrawHoughLine(edgel_frame, max_vert_hough_line,
                                            color)
            x = max_vert_line_edge_points[0][i]
            y = max_vert_line_edge_points[1][i]
            cv2.circle(edgel_frame, (x, y), 5, color, -1)
        for i in range(len(min_vert_line_edge_points[0])):
            if i == 0:
                color = (0, 255, 0)  #GREEN
                edgel_frame = DrawHoughLine(edgel_frame, min_vert_hough_line,
                                            color)
            x = min_vert_line_edge_points[0][i]
            y = min_vert_line_edge_points[1][i]
            cv2.circle(edgel_frame, (x, y), 5, color, -1)

    if print_hough_positions:
        print 'max_hor_hough_line: ', max_hor_hough_line
        print 'min_hor_hough_line: ', min_hor_hough_line
        print 'max_vert_hough_line: ', max_vert_hough_line
        print 'min_vert_hough_line: ', min_vert_hough_line

    # hough_lines = [max_hor_hough_line, min_hor_hough_line, max_vert_hough_line, min_vert_hough_line]
    return edgel_frame, hough_lines