예제 #1
0
	def test_PyQtGraphImageMultipleImages(self):
		'''
		 @brief Test PyQtImage real-time plot with multiple images
		'''
		from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage
		from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import CheckColor, GetShape
		import time, cv2, timeit
		from getpass import getpass

		for folder, left_frames, right_frames, actual_distances, baselines, use_set in self.GetFrameSets():
			if use_set:
				self.pqImage = self.PyQtImage.PyQtImage(initialize=True, title='Test real-time multiple plot')
				timer = timeit.default_timer()
				reset = True
				for i in range(len(left_frames)):
					frame_l 	= GetImage(folder+left_frames[i][0])
					frame_l_sl 	= GetImage(folder+left_frames[i][1])
					frame_r 	= GetImage(folder+right_frames[i][0])
					frame_r_sl 	= GetImage(folder+right_frames[i][1])
					frame_l 		= CheckColor(frame_l)
					widht, height 	= GetShape(frame_l)
					cv2.line(frame_l, (0,0), (height,widht), (255,255,0), 10)
					touple_frames = [('frame_l', frame_l), ('frame_l_sl', frame_l_sl), ('frame_r', frame_r), ('frame_r_sl', frame_r_sl)]
					self.pqImage(touple_frames, rotate=True)
					if not(self.CheckAllTests()):
						getpass('Press enter to continue to next frame..')
					if reset:
						self.pqImage(reset=True)
						reset = False
					else:
						reset = True
				print 'Delay for processing {0} image sets was: {1} sec'.format(len(left_frames), timeit.default_timer()-timer)
				self.pqImage.ClosePQWindow()
예제 #2
0
    def TestHeading(self, folder, fn_frame, fn_slframe, draw=True):
        '''
		 @brief Test function for Heading unit.

		 @param folder Input folder 
		 @param fn_frame Frame filename without points.
		 @param fn_slframe Frame filename with points.
		 @param draw (default=True - False will test speed and show only heading in frame)
		'''
        import timeit, warnings
        import numpy as np
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow
        from Settings.Exceptions import DroneVisionError

        print '\n'
        print '#----------- TESTING HEADING PROCESSING   \t---------------#'
        print '#----------- Image without points: {0} \t---------------#'.format(
            fn_frame)
        print '#----------- Image with points: {0}    \t---------------#'.format(
            fn_slframe)

        settings_inst = self.Settings.Settings()

        fn_frame = folder + fn_frame
        fn_slframe = folder + fn_slframe

        delay = timeit.default_timer()
        frame = GetImage(fn_frame)
        sl_frame = GetImage(fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        heading = self.Heading.Heading(
            settings_inst.GetSettings('CV', 'rho_step_distance'),
            settings_inst.GetSettings('CV', 'rho_min_diag_perc'))

        pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())
        pointDet.CalibratePointDetection()
        print 'Min distance between blobs: {0}'.format(
            pointDet.GetMinDistanceBetweenBlobs())

        total_delay = timeit.default_timer()

        delay = timeit.default_timer()
        try:
            delta_frame, point_kp, blob_desc, frame_un, sl_frame_un = pointDet.GetPointList(
                frame, sl_frame, draw=draw)
        except DroneVisionError, err:
            warnings.simplefilter('always')
            warnings.warn(str(err), Warning)
            warnings.simplefilter('default')
            return
예제 #3
0
	def TestBlobDetection(self, folder, fn_frame, fn_slframe):
		'''
		 @brief Test function for blob detection unit.

		 @param folder Input folder 
		 @param fn_frame Frame filename without points.
		 @param fn_slframe Frame filename with points.
		'''
		import timeit, warnings
		import numpy as np
		from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow
		from src.DroneVision.DroneVision_src.imgProcessing.featureDetection.PointDetection import PointDetection
		from Settings.Exceptions import DroneVisionError

		print '\n'
		print '#----------- TESTING BLOB DETECTION   \t---------------#'
		print '#----------- Image without points: {0} \t---------------#'.format(fn_frame)
		print '#----------- Image with points: {0}    \t---------------#'.format(fn_slframe)

		settings_inst 	= self.Settings.Settings()

		fn_frame 	= folder + fn_frame
		fn_slframe 	= folder + fn_slframe

		delay = timeit.default_timer()
		frame 		= GetImage(fn_frame)
		sl_frame 	= GetImage(fn_slframe)
		print 'Delay reading images: {0} sec'.format(timeit.default_timer() - delay)

		# Detector types: 0,1,2,3 - simple blob detector, ORB, SIFT, SURF
		detector_type_titles = ['Simple Blob Detector', 'ORB Detector', 'SIFT Detector', 'SURF Detector']
		feature_point_frames = []
		for detector_type in range(len(detector_type_titles)):

			settings_inst.ChangeSetting('CV', 'detector_type', detector_type)
			pointDet 	= PointDetection.PointDetection(True, settings_inst.GetSettings())
			pointDet.CalibratePointDetection()
			print '\n\nMin distance between blobs: {0}'.format(pointDet.GetMinDistanceBetweenBlobs())

			try:
				total_delay = timeit.default_timer()
				delta_frame, point_kp, blob_desc, frame_un, sl_frame_un = pointDet.GetPointList(frame, sl_frame, draw=False)
				timeout = timeit.default_timer() - total_delay
				print 'Total delay for downscaling + undistort + blob using {0}: {1} sec'.format(detector_type_titles[detector_type], timeout)
			except DroneVisionError, err:
				warnings.simplefilter('always')
				warnings.warn(str(err)+' - Detector type: {0}'.format(detector_type_titles[detector_type]), Warning)
				warnings.simplefilter('default')
				continue

			delta_frame_drawn, point_kp, blob_desc, frame_un, sl_frame_un = pointDet.GetPointList(frame, sl_frame, draw=True)
			feature_point_frames.append((detector_type_titles[detector_type], delta_frame_drawn))
예제 #4
0
    def TestPointDetection(self, folder, fn_frame, fn_slframe):
        '''
		 @brief Test function for point detection unit.

		 @param folder Input folder 
		 @param fn_frame Frame filename without points.
		 @param fn_slframe Frame filename with points.
		'''
        import timeit, warnings
        import numpy as np
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow
        from Settings.Exceptions import DroneVisionError

        print '\n'
        print '#----------- TESTING POINT DETECTION   \t---------------#'
        print '#----------- Image without points: {0} \t---------------#'.format(
            fn_frame)
        print '#----------- Image with points: {0}    \t---------------#'.format(
            fn_slframe)

        settings_inst = self.Settings.Settings()

        fn_frame = folder + fn_frame
        fn_slframe = folder + fn_slframe

        delay = timeit.default_timer()
        frame = GetImage(fn_frame)
        sl_frame = GetImage(fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())
        pointDet.CalibratePointDetection()
        print 'Min distance between blobs: {0}'.format(
            pointDet.GetMinDistanceBetweenBlobs())

        total_delay = timeit.default_timer()

        delay = timeit.default_timer()
        try:
            delta_frame, point_kp, blob_desc, frame_un, sl_frame_un = pointDet.GetPointList(
                frame, sl_frame, draw=False)
        except DroneVisionError, err:
            warnings.simplefilter('always')
            warnings.warn(str(err), Warning)
            warnings.simplefilter('default')
            return
예제 #5
0
    def CalibrateCamera(self, frame_size=None):
        '''
		 @brief Compute the camera matrix, distortion coefficients, rotation and translation vectors.

		 @param frame_size (height. width)
		'''
        if not (isinstance(frame_size, tuple)) and not (isinstance(
                frame_size, list)):
            frame_size = GetShape(GetImage(self.__calib_img_fnames[0]))
        if self.__focal_length != None and self.__sensor_size != None:
            #flags 	= cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_FIX_ASPECT_RATIO
            flags = cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_FIX_PRINCIPAL_POINT
            self.__calib_params['intrinsic_mtx'] = self.ComputeCameraMatrix(
                frame_size, self.__focal_length, self.__sensor_size)
            retval, self.__calib_params['intrinsic_mtx'], self.__calib_params[
                'distortion_coeffs'], rvecs, tvecs = cv2.calibrateCamera(
                    self.__calib_params['objpoints'],
                    self.__calib_params['imgpoints'],
                    (frame_size[1], frame_size[0]),
                    self.__calib_params['intrinsic_mtx'],
                    None,
                    flags=flags)
        else:
            retval, self.__calib_params['intrinsic_mtx'], self.__calib_params[
                'distortion_coeffs'], rvecs, tvecs = cv2.calibrateCamera(
                    self.__calib_params['objpoints'],
                    self.__calib_params['imgpoints'],
                    (frame_size[1], frame_size[0]), None, None)
        if not (retval):
            raise Exception('Camera calibration failed!')
예제 #6
0
    def FindChessBoardCorners(self):
        '''
		 @brief Find chessboard matches and append to imgpoints.
		'''
        if self.__show_chessboard_img:
            realTimePlot = self.GetNewRealTimePlot()

        ret_calib = False
        for i in range(len(self.__calib_img_fnames)):
            fname = self.__calib_img_fnames[i]
            img = GetImage(fname, gray=False)
            gray = CheckGrayScale(img)
            gray_shape = GetShape(gray)
            if i > 0:
                if not (self.CheckIntrinsicScale(gray_shape)):
                    raise ValueError(
                        'Calibration image dimensions do not match!')
            else:
                self.SetScale(gray_shape)

            # Find the chess board corners
            flags = cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE | cv2.CALIB_CB_FAST_CHECK
            ret, corners = cv2.findChessboardCorners(
                gray, (self.__calib_chess_rows, self.__calib_chess_columns),
                flags=flags)

            # If found, add object points, image points (after refining them)
            print_msg = '{0}/{1} images processed'.format(
                i + 1, len(self.__calib_img_fnames))
            if ret:
                self.PrintCalibrationProcess(
                    print_msg + ' - {0} was successfull'.format(fname))
                ret_calib = True
                self.__calib_params['objpoints'].append(self.__objp)

                corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                           self.__criteria)
                self.__calib_params['imgpoints'].append(corners)

                if self.__show_chessboard_img:
                    cv2.drawChessboardCorners(
                        img,
                        (self.__calib_chess_rows, self.__calib_chess_columns),
                        corners, ret)
                    realTimePlot(reset=True)
                    realTimePlot([(fname[len(self.__calib_folder):], img)],
                                 'calibration_frames')
            else:
                self.PrintCalibrationProcess(
                    print_msg + ' - {0} was not successfull'.format(fname))
                if self.__show_chessboard_img:
                    realTimePlot(reset=True)
                    realTimePlot([
                        ('Failed: ' + fname[len(self.__calib_folder):], gray)
                    ], 'calibration_frames')

        if not (ret_calib):
            err_msg = 'None of the chessboard calibration frames could be used for calibration. Folder = {0}'.format(
                self.__calib_folder)
            raise Exception(err_msg)
    def ShowTestCalibImage(self):
        '''
		 @brief Show test image in plot
		'''
        if self.__show_chessboard_img:
            touple_frames = []
            if self.__me_master:
                side_txt = 'left'
                test_img_fname = self.__leftCameraCalibration.GetDistorionCalibImages(
                )[0]
            else:
                side_txt = 'right'
                test_img_fname = self.__rightCameraCalibration.GetDistorionCalibImages(
                )[0]

            test_img = GetImage(test_img_fname)

            headline = '[{0}] before shape {1}'.format(side_txt,
                                                       test_img.shape)
            touple_frames.append((headline, test_img))

            test_und_img = self.Undistort(test_img)
            headline = '[{0}] After undistort shape {1}'.format(
                side_txt, test_und_img.shape)
            touple_frames.append((headline, test_und_img))

            realTimePlot = self.GetNewRealTimePlot()
            realTimePlot(touple_frames, 'calibration_result')
    def test_CameraCalibration(self):
        '''
		 @brief Test camera calibration unit by undistorting chess board images.
		'''
        from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import PyrDown, GetShape
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow, RealTimePlot

        settings_inst = self.Settings.Settings()
        settings_inst.ChangeSetting('BASIC', 'reset_calibration', True)
        settings_inst.ChangeSetting('CALIB', 'calib_show_imgs',
                                    not (self.CheckAllTests()))
        settings_inst.ChangeSetting('CALIB', 'calib_print_process', True)
        settings_inst.ChangeSetting('CALIB', 'calib_show_imgs', True)
        use_PyQt = False
        realTimePlot = RealTimePlot(interactive_mode=False)
        #realTimePlot = None
        left_calib = self.CameraCalibration.CameraCalibration(
            settings_inst.GetSettings('CALIB'),
            settings_inst.GetSettings('CALIB', 'calib_img_folder_left_cam'),
            settings_inst.GetSettings('CALIB', 'calib_save_fname_left_cam'),
            settings_inst.GetSettings('BASIC', 'reset_calibration'),
            plot_figure=realTimePlot,
            use_PyQt=use_PyQt)
        right_calib = self.CameraCalibration.CameraCalibration(
            settings_inst.GetSettings('CALIB'),
            settings_inst.GetSettings('CALIB', 'calib_img_folder_right_cam'),
            settings_inst.GetSettings('CALIB', 'calib_save_fname_right_cam'),
            settings_inst.GetSettings('BASIC', 'reset_calibration'),
            plot_figure=realTimePlot,
            use_PyQt=use_PyQt)
        calibs = [(left_calib, 'Left: '), (right_calib, 'Right: ')]

        touple_frames = []
        for i in range(len(calibs)):
            calib, calib_name = calibs[i]
            calib.CalibrateCameraDistortion()
            calib.ShowTestCalibImage()

            test_img_fname = calib.GetDistorionCalibImages()[0]
            test_img = GetImage(test_img_fname)
            test_img = PyrDown(
                test_img,
                settings_inst.GetSettings('CV',
                                          'default_downsampling_divisor'),
                settings_inst.GetSettings('CV', 'desired_frame_shape'))

            headline = calib_name + 'before shape {0}'.format(test_img.shape)
            touple_frames.append((headline, test_img))

            test_und_img = calib.Undistort(test_img)
            headline = calib_name + 'After undistort shape {0}'.format(
                test_und_img.shape)
            touple_frames.append((headline, test_und_img))

        if not (self.CheckAllTests()):
            MatplotShow(touple_frames,
                        test_img_fname + '_Camera_calibration_test',
                        save_fig=self.save_figs,
                        save_fig_only=self.save_figs_only)
예제 #9
0
    def CalibrateStandardScaling(self, printInfo=False):
        '''
		 @brief Run standard scale calibration.

		 @param printInfo (Print info during calibration (default=False))
		'''
        self.SetScalingImages()

        standard_scale_distances = []
        mean_point_sizes = []
        for sl_fname, fname in self.__calib_img_fnames:
            frame = GetImage(fname, gray=False)
            sl_frame = GetImage(sl_fname, gray=False)
            try:
                delta_frame, keypoints, descriptors, frame, sl_frame = self.GetPointList(
                    frame,
                    sl_frame,
                    concatenate_points=True,
                    compute_descriptors=False)
            except DroneVisionError, err:
                warnings.simplefilter('always')
                warnings.warn(
                    str(err) +
                    ' - file: {0}, SL file: {1}'.format(fname, sl_fname),
                    Warning)
                warnings.simplefilter('default')
                continue
            width, height = GetShape(sl_frame)
            blockSize = int(
                math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0)) // 2)
            scaling_point_frame, mean_point_size = self.PointScaleMatch(
                delta_frame, keypoints, blockSize)
            standard_scale_distance = self.ComputeStandardScaleDistance(
                scaling_point_frame)
            if np.isnan(standard_scale_distance):
                err = 'No scaling points detected - file: {0}, SL file: {1}'.format(
                    fname, sl_fname)
                warnings.simplefilter('always')
                warnings.warn(str(err), Warning)
                warnings.simplefilter('default')
                continue

            standard_scale_distances.append(standard_scale_distance)
            mean_point_sizes.append(mean_point_size)
예제 #10
0
    def ShowTestCalibImage(self):
        '''
		 @brief Show test image in plot
		'''
        if self.__show_chessboard_img:
            touple_frames = []
            test_img_fname = self.GetDistorionCalibImages()[0]
            test_img = GetImage(test_img_fname)

            headline = 'Before shape {0}'.format(test_img.shape)
            touple_frames.append((headline, test_img))

            test_und_img = self.Undistort(test_img)
            headline = 'After undistort shape {0}'.format(test_und_img.shape)
            touple_frames.append((headline, test_und_img))

            realTimePlot = self.GetNewRealTimePlot()
            realTimePlot(touple_frames, 'calibration_result')
예제 #11
0
	def test_PyQtGraphImageSingleImage(self):
		'''
		 @brief Test PyQtImage real-time plot with single image
		'''
		from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage
		import time, timeit
		from getpass import getpass

		for folder, left_frames, right_frames, actual_distances, baselines, use_set in self.GetFrameSets():
			if use_set:
				self.pqImage = self.PyQtImage.PyQtImage(initialize=True, title='Test real-time single plot')
				timer = timeit.default_timer()
				for left_frame in left_frames:
					input_image_fn = left_frame[0]
					frame = GetImage(folder+input_image_fn)
					self.pqImage.UpdatePQImage(frame, 'left_frame', rotate=True)
					#if not(self.CheckAllTests()):
					#	getpass('Press enter to continue to next frame..')
				print 'Delay for processing {0} images was: {1} sec'.format(len(left_frames), timeit.default_timer()-timer)
				self.pqImage.ClosePQWindow()
    def TestRecordFrames(self, input_image_fn):
        '''
		 @brief Test unit for RecordFrames

		 @param input_image_fn
		'''
        import time
        from src.bin.tools import CheckDir
        CheckDir(self.vid_rec_folder)
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage
        from src.DroneVision.DroneVision_src.hardware.VideoLink import VideoLink
        video_recorder = self.RecordFrames.RecordFrames(
            self.vid_rec_fps, self.vid_rec_folder, self.video_rec_output_fname)
        while video_recorder.GetNumberOfRecordedFrames() < self.max_rec_frames:
            frame = GetImage(input_image_fn)
            #video_recorder.WriteFrameThread(frame)
            #time.sleep(0.5)
            video_recorder.WriteFrame(frame)
            print 'N recorded frames: ', video_recorder.GetNumberOfRecordedFrames(
            )
        video_recorder.CloseRecording()
	def test_CameraLink(self):
		'''
		 @brief Test Ptg camera link by receiving frames, and saving them. 
		'''
		if not(self.CheckAllTests()): # Trigger test covers the same manually
			return

		import timeit
		from src.DroneVision.DroneVision_src.hardware.imageTools import WriteImage, GetImage
		from src.bin.tools import GetTimestamp, CheckDir

		##### SETUP FOLDERS ######
		settings_inst 		= self.Settings.Settings()
		settings_inst.ChangeSetting('CAMERA', 'manual_triggering', False)
		test_frame_folder 	= settings_inst.GetSettings('DATABASE', 'output_folder') + '/TEST/' + GetTimestamp() + '/'
		test_frame_folder 	+= 'left_test_camera/'
		CheckDir(test_frame_folder)
		##########################

		cameralink = self.CameraLink.CameraLink(True, settings_inst.GetSettings('CAMERA'), settings_inst.GetSettings('LASER'))
		start_time = timeit.default_timer()
		raise_error = False
		i = 0
		try:
			while timeit.default_timer() - start_time < 5:
				frame, sl_frame = cameralink.GetFrame()
				print frame.shape
				fps, width, height  = cameralink.GetFrameProperties()
				total_frames 		= cameralink.GetTotalFrames()
				frame_n 			= cameralink.GetFrameNumber()
				print 'GET: ', fps, width, height, total_frames, frame_n
				WriteImage(frame, test_frame_folder + 'frame_' + str(frame_n))
				frame = GetImage(test_frame_folder + 'frame_' + str(frame_n) + '.tif')
				i += 1
		except:
			raise_error = True
		cameralink.StopCamera()
		if raise_error:
			raise
예제 #14
0
	def test_StereoCalibration(self):
		'''
		 @brief Test function for the stereo calibration unit
		'''
		from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import PyrDown
		from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow

		settings_inst = self.Settings.Settings()
		settings_inst.ChangeSetting('BASIC', 'reset_calibration', True)
		settings_inst.ChangeSetting('CALIB', 'calib_show_imgs', not(self.CheckAllTests()))
		settings_inst.ChangeSetting('CALIB', 'calib_print_process', True)
		settings_inst.ChangeSetting('CALIB', 'calib_show_imgs', False)
		stereo_calib = self.StereoCalibration.StereoCalibration(True, settings_inst.GetSettings('CALIB'), settings_inst.GetSettings('BASIC', 'reset_calibration'))
		stereo_calib.CalibrateStereoVisionSystem()

		# Try reloading save parameters
		settings_inst.ChangeSetting('BASIC', 'reset_calibration', False)
		stereo_calib = self.StereoCalibration.StereoCalibration(True, settings_inst.GetSettings('CALIB'), settings_inst.GetSettings('BASIC', 'reset_calibration'))
		stereo_calib.CalibrateStereoVisionSystem()
		calibs = [(stereo_calib.GetLeftCameraCalibrationInstance(), 'Left: '), (stereo_calib.GetRightCameraCalibrationInstance(), 'Right: ')]

		touple_frames = []
		for i in range(len(calibs)):
			calib, calib_name = calibs[i]

			test_img_fname 			= calib.GetDistorionCalibImages()[0]
			test_img 				= GetImage(test_img_fname)
			test_img 				= PyrDown(test_img, settings_inst.GetSettings('CV', 'default_downsampling_divisor'), settings_inst.GetSettings('CV', 'desired_frame_shape'))
			
			headline 				= calib_name + 'before shape {0}'.format(test_img.shape)
			touple_frames.append((headline, test_img))
			
			test_und_img 			= stereo_calib.Undistort(test_img)
			headline 				= calib_name + 'stereo und shape {0}'.format(test_und_img.shape)
			touple_frames.append((headline, test_und_img))
		
		if not(self.CheckAllTests()):
			MatplotShow(touple_frames, test_img_fname+'_Stereo_calibration_test', save_fig=self.save_figs, save_fig_only=self.save_figs_only)
    def TestPointMatchStereoVision(self,
                                   folder,
                                   left_fn_frame,
                                   left_fn_slframe,
                                   right_fn_frame,
                                   right_fn_slframe,
                                   baseline,
                                   actual_distance,
                                   filtrate_3Dpoints=True,
                                   use_triangulation=True,
                                   use_opencv_triangulation=True,
                                   use_block_matching=True,
                                   use_brute_force_matching=False):
        '''
		 @brief Test function for stereo detection unit using point matching.

		 @param folder Input folder 
		 @param left_fn_frame Frame filename without points.
		 @param left_fn_slframe Frame filename with points.
		 @param right_fn_frame Frame filename without points.
		 @param right_fn_slframe Frame filename with points.
		 @param baseline (mm)
		 @param actual_distance (mm)
		 @param filtrate_3Dpoints (Filtrate 3D points which are outside of the standard deviation)
		 @param use_triangulation (True for using tringulation method, False for using easy disparity to depth method)
		 @param use_opencv_triangulation (True for using the opencv implementation of triangulation)
		 @param use_block_matching (use the block matching feature matching method)
		 @param use_brute_force_matching (use the brute force matching method (if use_block_matching=False))
		'''
        import timeit
        import numpy as np
        from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow

        if use_block_matching:
            title = 'BLOCK SEARCH'
        else:
            if use_brute_force_matching:
                title = 'BRUTE F.'
            else:
                title = 'FLANN'
        if use_triangulation:
            extra_info = 'TRIANGULATION TO DEPTH'
        else:
            extra_info = 'DISPARITY TO DEPTH'
        print '\n'
        print '#----------- TESTING FEATURE BASED STEREO DETECTION - {0} MATCHING - {1}\t---------------#'.format(
            title, extra_info)
        print '#----------- Left Image without points: {0} \t\t\t\t\t---------------#'.format(
            left_fn_frame)
        print '#----------- Right Image without points: {0} \t\t\t\t---------------#'.format(
            right_fn_frame)

        settings_inst = self.Settings.Settings()
        settings_inst.ChangeSetting('BASIC', 'reset_calibration', False)
        #settings_inst.ChangeSetting('CALIB', 'baseline', baseline)
        settings_inst.ChangeSetting('F_STEREO', 'use_triangulation',
                                    use_triangulation)
        settings_inst.ChangeSetting('F_STEREO', 'use_cv2_triangulation',
                                    use_opencv_triangulation)
        settings_inst.ChangeSetting('F_STEREO', 'use_block_matching',
                                    use_block_matching)
        settings_inst.ChangeSetting('F_STEREO', 'use_brute_force',
                                    use_brute_force_matching)
        left_pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())
        left_pointDet.CalibratePointDetection(printInfo=True)
        right_pointDet = self.PointDetection.PointDetection(
            False, settings_inst.GetSettings())
        right_pointDet.CalibratePointDetection(printInfo=False)

        #if not(left_pointDet.GetBaseline() == baseline) or not(right_pointDet.GetBaseline() == baseline):
        #	raise Exception('Fail baseline!')

        left_fn_frame = folder + left_fn_frame
        left_fn_slframe = folder + left_fn_slframe
        right_fn_frame = folder + right_fn_frame
        right_fn_slframe = folder + right_fn_slframe

        delay = timeit.default_timer()
        left_frame = GetImage(left_fn_frame)
        left_sl_frame = GetImage(left_fn_slframe)
        right_frame = GetImage(right_fn_frame)
        right_sl_frame = GetImage(right_fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        #MatplotShow([('left', left_sl_frame), ('right', right_sl_frame)], 'Feature Stereopsis', save_fig=self.save_figs, save_fig_only=self.save_figs_only)

        left_delta_frame, left_points_kp, left_blob_desc, left_frame, left_sl_frame = left_pointDet.GetPointList(
            left_frame,
            left_sl_frame,
            compute_descriptors=not (use_block_matching),
            draw=True)
        right_delta_frame, right_points_kp, right_blob_desc, right_frame, right_sl_frame = right_pointDet.GetPointList(
            right_frame,
            right_sl_frame,
            compute_descriptors=not (use_block_matching),
            draw=True)

        total_delay = timeit.default_timer()

        points3D, match_points_frame = left_pointDet.Get3DPoints(
            GetShape(left_delta_frame),
            GetShape(right_delta_frame),
            left_points_kp,
            left_blob_desc,
            right_points_kp,
            right_blob_desc,
            filtrate_3Dpoints=filtrate_3Dpoints,
            draw=True,
            left_delta_frame=left_delta_frame,
            right_delta_frame=right_delta_frame)
        points3D_m = left_pointDet.Points3DToMatrix(points3D)
        average_point3D = np.mean(points3D_m, axis=1)
        std_point3D = np.std(points3D_m, axis=1)

        timeout = timeit.default_timer() - total_delay
        print 'Total delay for feature based stereopsis: {0} sec'.format(
            timeout)

        delay = timeit.default_timer()
        points3D = left_pointDet.Get3DPoints(
            GetShape(left_delta_frame),
            GetShape(right_delta_frame),
            left_points_kp,
            left_blob_desc,
            right_points_kp,
            right_blob_desc,
            filtrate_3Dpoints=filtrate_3Dpoints,
            draw=False,
            left_delta_frame=left_delta_frame,
            right_delta_frame=right_delta_frame)
        timeout_stereopsis = timeit.default_timer() - delay

        for point3D in points3D:
            print 'Point3D: x = {0} \t y = {1} \t z = {2}'.format(
                point3D[0, 0], point3D[1, 0], point3D[2, 0])
        print 'Average Point3D: x = {0} \t y = {1} \t z = {2}'.format(
            average_point3D[0, 0], average_point3D[1, 0], average_point3D[2,
                                                                          0])
        print 'STD Point3D: x = {0} \t y = {1} \t z = {2}'.format(
            std_point3D[0, 0], std_point3D[1, 0], std_point3D[2, 0])
        print 'Delay for computing distance points: {0} sec, average distance: {1} mm, actual distance: {2} mm, distance error: {3} mm, baseline = {4} mm'.format(
            timeout_stereopsis, average_point3D[2, 0], actual_distance,
            average_point3D[2, 0] - actual_distance, baseline)

        #MatplotShow([(title, match_points_frame)], save_fig=self.save_figs, save_fig_only=self.save_figs_only)

        #title += ':Z(mean)={0:.2f}mm'.format(average_point3D[2,0])

        if not (self.CheckAllTests()):
            if self.show_delta_frames:
                MatplotShow([('left', left_delta_frame),
                             ('right', right_delta_frame)],
                            left_fn_frame +
                            '_Feature_stereo_test_delta_frames',
                            save_fig=self.save_figs,
                            save_fig_only=self.save_figs_only)

        return (title, match_points_frame)
    def TestCornerDetection(self, folder, fn_frame, fn_slframe):
        '''
		 @brief Test function for corner detection unit.

		 @param folder Input folder 
		 @param fn_frame Frame filename without points.
		 @param fn_slframe Frame filename with points.
		'''
        import timeit
        import numpy as np
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow
        from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape
        from src.DroneVision.DroneVision_src.imgProcessing.featureDetection.generalDetectors.detectEdges import Canny
        from src.DroneVision.DroneVision_src.imgProcessing.featureDetection.generalDetectors.detectCorners import DetectCorners, DrawCorners, DetectCornersCV2

        print '\n'
        print '#----------- TESTING CORNER DETECTION   \t---------------#'
        print '#----------- Image without points: {0} \t---------------#'.format(
            fn_frame)
        print '#----------- Image with points: {0}    \t---------------#'.format(
            fn_slframe)

        settings_inst = self.Settings.Settings()

        fn_frame = folder + fn_frame
        fn_slframe = folder + fn_slframe

        delay = timeit.default_timer()
        frame = GetImage(fn_frame)
        sl_frame = GetImage(fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())
        pointDet.CalibratePointDetection()
        print 'Min distance between blobs: {0}'.format(
            pointDet.GetMinDistanceBetweenBlobs())

        total_delay = timeit.default_timer()

        delay = timeit.default_timer()
        delta_frame, point_kp, blob_desc, frame_un, sl_frame_un = pointDet.GetPointList(
            frame, sl_frame, draw=False)
        print 'Delay for blob point detection: {0} sec, detected blobs: {1}'.format(
            timeit.default_timer() - delay, len(point_kp))

        delay = timeit.default_timer()
        corners = DetectCorners(
            delta_frame,
            point_kp,
            kernel_size=pointDet.GetMinDistanceBetweenBlobs() * 6,
            kernel_step_size=pointDet.GetMinDistanceBetweenBlobs(),
            k=0.06,
            thresh=1000)
        timeout = timeit.default_timer() - delay
        print 'Delay for finding corners: {0} sec, (shape = {1}), number of corners: {2}'.format(
            timeout, delta_frame.shape, len(corners))

        delay = timeit.default_timer()
        corners_cv2 = DetectCornersCV2(
            delta_frame,
            point_kp,
            kernel_size=int(pointDet.GetMinDistanceBetweenBlobs() * 6),
            k_sobel_size=3,
            k=0.04,
            thresh=0.8)
        timeout = timeit.default_timer() - delay
        print 'Delay for finding corners (cv2): {0} sec, (shape = {1}), number of corners: {2}'.format(
            timeout, delta_frame.shape, len(corners_cv2))

        delay = timeit.default_timer()
        corners_cv2_canny = DetectCornersCV2(Canny(frame_un), [],
                                             kernel_size=2,
                                             k_sobel_size=3,
                                             k=0.04,
                                             thresh=0.8)
        timeout = timeit.default_timer() - delay
        print 'Delay for finding corners (cv2): {0} sec, (shape = {1}), number of corners: {2}'.format(
            timeout, delta_frame.shape, len(corners_cv2))

        corners_frame = np.array(delta_frame)
        corners_frame = DrawCorners(corners_frame, corners)

        corners_frame_cv2 = np.array(delta_frame)
        corners_frame_cv2 = DrawCorners(corners_frame_cv2, corners_cv2)

        corners_frame_cv2_canny = np.array(Canny(frame_un))
        corners_frame_cv2_canny = DrawCorners(corners_frame_cv2_canny,
                                              corners_cv2_canny)

        touple_frames = []
        touple_frames.append(('Corners', corners_frame))
        touple_frames.append(('Corners (cv2)', corners_frame_cv2))
        touple_frames.append(('Corners Canny (cv2)', corners_frame_cv2_canny))
        touple_frames.append(('Delta Frame', delta_frame))
        if not (self.CheckAllTests()):
            MatplotShow(touple_frames,
                        fn_frame + '_corner_test',
                        save_fig=self.save_figs,
                        save_fig_only=self.save_figs_only)
    def TestEdgeHeading(self, folder, fn_frame, fn_slframe):
        '''
		 @brief Test function for EdgeHeading unit.

		 @param folder Input folder 
		 @param fn_frame Frame filename without points.
		 @param fn_slframe Frame filename with points.
		'''
        import timeit
        import numpy as np
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow

        print '\n'
        print '#----------- TESTING EDGE HEADING PROCESSING   \t---------------#'
        print '#----------- Image without points: {0} \t---------------#'.format(
            fn_frame)
        print '#----------- Image with points: {0}    \t---------------#'.format(
            fn_slframe)

        settings_inst = self.Settings.Settings()

        fn_frame = folder + fn_frame
        fn_slframe = folder + fn_slframe

        delay = timeit.default_timer()
        frame = GetImage(fn_frame)
        sl_frame = GetImage(fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        edgeHeading = self.EdgeHeading.EdgeHeading()

        pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())
        pointDet.CalibratePointDetection()
        print 'Min distance between blobs: {0}'.format(
            pointDet.GetMinDistanceBetweenBlobs())

        total_delay = timeit.default_timer()

        delay = timeit.default_timer()
        delta_frame, point_kp, blob_desc, frame_un, sl_frame_un = pointDet.GetPointList(
            frame, sl_frame, draw=True)
        print 'Delay for blob point detection: {0} sec, detected blobs: {1}'.format(
            timeit.default_timer() - delay, len(point_kp))

        delay = timeit.default_timer()
        hough_frame, edgel_map_filtered, boundary_hough_lines = pointDet.GetBoundaryHoughLines(
            frame_un,
            delta_frame,
            point_kp,
            draw=True,
            print_hough_positions=True)
        print 'Delay for finding boundary edges (filtered) + lines: {0} sec'.format(
            timeit.default_timer() - delay)

        delay = timeit.default_timer()
        selected_hor_edge_heading, selected_vert_edge_heading, possible_hor_edge_headings, possible_vert_edge_headings = edgeHeading.ComputeEdgeHeading(
            edgel_map_filtered, boundary_hough_lines, draw=False)
        print 'Delay for finding edge heading angle: {0} sec, hor_edge_heading = {1}, vert_edge_heading = {2}'.format(
            timeit.default_timer() - delay, selected_hor_edge_heading,
            selected_vert_edge_heading)

        timeout = timeit.default_timer() - total_delay
        print 'Total delay for downscaling + undistort + blob + hough lines + bounded lines + edge heading: {0} sec'.format(
            timeout)

        edgel_map_filtered_all_headings = np.array(
            edgel_map_filtered, dtype=edgel_map_filtered.dtype)
        selected_hor_edge_heading, selected_vert_edge_heading, possible_hor_edge_headings, possible_vert_edge_headings, edgel_map_filtered_all_headings = edgeHeading.ComputeEdgeHeading(
            edgel_map_filtered_all_headings, boundary_hough_lines, draw=True)

        touple_frames = []
        #touple_frames.append(('SL frame', sl_frame))
        #touple_frames.append(('SL undistorted', sl_frame_un))
        #touple_frames.append(('Original points', delta_frame))
        #touple_frames.append(('Hough lines', hough_frame))
        #touple_frames.append(('Selected edge heading', edgel_map_filtered))
        touple_frames.append(
            ('Possible edge headings', edgel_map_filtered_all_headings))
        print 'max_hor = BLUE, min_hor = RED, max_vert = PURPLE, min_vert = GREEN'
        if not (self.CheckAllTests()):
            MatplotShow(touple_frames,
                        fn_frame + '_Edge_heading_test',
                        savefig_folder=self.savefig_folder +
                        'edge_heading_test/',
                        save_fig=self.save_figs,
                        save_fig_only=self.save_figs_only,
                        inlude_main_title_in_plot=False)
    def TestLineDetection(self, folder, fn_frame, fn_slframe):
        '''
		 @brief Test function for line detection unit.

		 @param folder Input folder 
		 @param fn_frame Frame filename without points.
		 @param fn_slframe Frame filename with points.
		'''
        import timeit
        import numpy as np
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow
        from src.DroneVision.DroneVision_src.imgProcessing.featureDetection.generalDetectors.detectLines import HoughLinesPointMatrix, FindLineLimits, DrawHoughLines
        from src.DroneVision.DroneVision_src.imgProcessing.featureDetection.generalDetectors.detectEdges import Canny

        print '\n'
        print '#----------- TESTING LINE DETECTION   \t---------------#'
        print '#----------- Image without points: {0} \t---------------#'.format(
            fn_frame)
        print '#----------- Image with points: {0}    \t---------------#'.format(
            fn_slframe)

        settings_inst = self.Settings.Settings()

        fn_frame = folder + fn_frame
        fn_slframe = folder + fn_slframe

        delay = timeit.default_timer()
        frame = GetImage(fn_frame)
        sl_frame = GetImage(fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())
        pointDet.CalibratePointDetection()
        print 'Min distance between blobs: {0}'.format(
            pointDet.GetMinDistanceBetweenBlobs())

        total_delay = timeit.default_timer()

        delay = timeit.default_timer()
        delta_frame, point_kp, blob_desc, frame_un, sl_frame_un = pointDet.GetPointList(
            frame, sl_frame, draw=True)
        print 'Delay for blob point detection: {0} sec, detected blobs: {1}'.format(
            timeit.default_timer() - delay, len(point_kp))

        delay = timeit.default_timer()
        hough_frame, edgel_map_filtered, boundary_hough_lines = pointDet.GetBoundaryHoughLines(
            frame_un,
            delta_frame,
            point_kp,
            filtrate_edge_points=True,
            draw=False)
        print 'Delay for finding boundary edges (filtered) + lines: {0} sec'.format(
            timeit.default_timer() - delay)

        timeout = timeit.default_timer() - total_delay
        print 'Total delay for downscaling + undistort + blob + hough lines + bounded lines: {0} sec'.format(
            timeout)

        delay = timeit.default_timer()
        hough_frame, edgel_map_unfiltered, boundary_hough_lines = pointDet.GetBoundaryHoughLines(
            frame_un,
            delta_frame,
            point_kp,
            filtrate_edge_points=False,
            draw=False)
        print 'Delay for finding boundary edges + lines: {0} sec'.format(
            timeit.default_timer() - delay)

        # Compute with drawn figures for representation.
        hough_frame, edgel_map_filtered, boundary_hough_lines = pointDet.GetBoundaryHoughLines(
            frame_un,
            delta_frame,
            point_kp,
            filtrate_edge_points=True,
            draw=True)
        hough_frame, edgel_map_unfiltered, boundary_hough_lines = pointDet.GetBoundaryHoughLines(
            frame_un,
            delta_frame,
            point_kp,
            filtrate_edge_points=False,
            draw=True)

        delay = timeit.default_timer()
        hough_lines = HoughLinesPointMatrix(
            delta_frame,
            point_kp,
            radi_threshold=pointDet.GetMinDistanceBetweenBlobs(),
            radi_threshold_tuning_param=0.3)
        timeout = timeit.default_timer() - delay
        print 'Delay for finding hough lines matrix: {0} sec, (shape = {1})'.format(
            timeout, delta_frame.shape)
        vert_hor_hough_lines_draw = np.array(delta_frame)
        vert_hor_hough_lines_draw = DrawHoughLines(vert_hor_hough_lines_draw,
                                                   hough_lines)
        edgel_frame = Canny(frame_un)
        touple_frame_edgel_frame = ('Edgel Map', edgel_frame)
        touple_frame_bounded_hough_lines = ('Bounded Lines', hough_frame)

        touple_frames = []
        touple_frames.append(('Frame', frame_un))
        touple_frames.append(('SL Frame', sl_frame_un))
        touple_frames.append(('Feature Points', delta_frame))
        touple_frames.append(('Hough Line Matrix', vert_hor_hough_lines_draw))
        #touple_frames.append(('Hough lines', hough_frame))
        #touple_frames.append(('Boundary lines (filtered)', edgel_map_filtered))
        #touple_frames.append(('Boundary lines (unfiltered)', edgel_map_unfiltered))
        if not (self.CheckAllTests()):
            MatplotShow(touple_frames,
                        fn_frame + '_line_test',
                        save_fig=self.save_figs,
                        save_fig_only=self.save_figs_only)
            MatplotShow([
                touple_frame_edgel_frame, fn_frame + '_line_test',
                touple_frame_bounded_hough_lines
            ],
                        save_fig=self.save_figs,
                        save_fig_only=self.save_figs_only)
    def TestBlobScaleDetector(self, folder, fn_frame, fn_slframe,
                              actual_distance):
        '''
		 @brief Test function for blob scale detector unit.

		 @param folder Input folder
		 @param fn_frame Frame filename without points.
		 @param fn_slframe Frame filename with points.
		 @param actual_distance (mm)
		'''
        import timeit, math
        import numpy as np
        from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow
        from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape

        print '\n'
        print '#----------- TESTING SCALING BASED BLOB DETECTION   \t---------------#'
        print '#----------- Image without points: {0} \t---------------#'.format(
            fn_frame)
        print '#----------- Image with points: {0}    \t---------------#'.format(
            fn_slframe)

        settings_inst = self.Settings.Settings()
        settings_inst.ChangeSetting('BASIC', 'reset_calibration', False)
        pointDet = self.PointDetection.PointDetection(
            True, settings_inst.GetSettings())

        pointDet.CalibrateBlobScaleDetector(printInfo=True,
                                            force_calibration=True)
        print 'Min distance between blobs: {0}'.format(
            pointDet.GetMinDistanceBetweenBlobs())

        fn_frame = folder + fn_frame
        fn_slframe = folder + fn_slframe

        delay = timeit.default_timer()
        frame = GetImage(fn_frame)
        sl_frame = GetImage(fn_slframe)
        print 'Delay reading images: {0} sec'.format(timeit.default_timer() -
                                                     delay)

        total_delay = timeit.default_timer()

        delta_frame, points_kp, blob_desc, frame, sl_frame = pointDet.GetPointList(
            frame, sl_frame, concatenate_points=True, draw=True)

        delay = timeit.default_timer()
        width, height = GetShape(sl_frame)
        blockSize = int(
            math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0)) // 4)
        scaling_point_frame, mean_point_size = pointDet.PointScaleMatch(
            delta_frame, points_kp, blockSize)
        print 'Delay for computing disparity points: {0} sec'.format(
            timeit.default_timer() - delay)

        timeout = timeit.default_timer() - total_delay
        print 'Total delay for downscaling + undistort + blob + feature based stereopsis: {0} sec'.format(
            timeout)

        touple_frames = []
        touple_frames.append(('SL frame', sl_frame))
        touple_frames.append(('Delta frame', delta_frame))
        if not (self.CheckAllTests()):
            MatplotShow(touple_frames,
                        fn_frame + '_Blob_scale_test',
                        save_fig=self.save_figs,
                        save_fig_only=self.save_figs_only)