Exemple #1
0
    def __init__(self):

        # load config file
        VN_config.get_file("Smart_Camera")

        # get camera specs
        self.camera_index = VN_config.get_integer("camera", "camera_index", 0)
        self.camera_width = VN_config.get_integer("camera", "camera_width", 640)
        self.camera_height = VN_config.get_integer("camera", "camera_height", 480)
        self.camera_hfov = VN_config.get_float("camera", "horizontal-fov", 72.42)
        self.camera_vfov = VN_config.get_float("camera", "vertical-fov", 43.3)

        # simulator
        self.simulator = VN_config.get_boolean("simulator", "use_simulator", True)
        self.target_file = VN_config.get_string(
            "simulator", "target_location", os.environ["HOME"] + "/visnav/target.jpg"
        )
        self.target_size = VN_config.get_float("algorithm", "outer_ring", 1.0)

        # Run the program no matter what mode or location; Useful for debug purposes
        self.always_run = VN_config.get_boolean("general", "always_run", True)

        # how many frames have been captured
        self.frames_captured = 0

        # debugging:
        self.kill_camera = False
Exemple #2
0
    def __init__(self):

        #load config file
        VN_config.get_file('Smart_Camera')

        #get camera specs
        self.camera_index = VN_config.get_integer('camera', 'camera_index', 0)
        self.camera_width = VN_config.get_integer('camera', 'camera_width',
                                                  640)
        self.camera_height = VN_config.get_integer('camera', 'camera_height',
                                                   480)
        self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov',
                                               72.42)
        self.camera_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)

        #simulator
        self.simulator = VN_config.get_boolean('simulator', 'use_simulator',
                                               True)
        self.target_file = VN_config.get_string(
            'simulator', 'target_location',
            os.environ['HOME'] + '/visnav/target.jpg')
        self.target_size = VN_config.get_float('algorithm', 'outer_ring', 1.0)

        #Run the program no matter what mode or location; Useful for debug purposes
        self.always_run = VN_config.get_boolean('general', 'always_run', True)

        #how many frames have been captured
        self.frames_captured = 0

        #debugging:
        self.kill_camera = False
Exemple #3
0
    def __init__(self):

        # get which camera we will use
        self.camera_index = VN_config.get_integer('camera','index',0)


        # get image resolution
        self.img_width = VN_config.get_integer('camera','width',640)
        self.img_height = VN_config.get_integer('camera','height',480)
        

        # get image center
        self.img_center_x = self.img_width / 2
        self.img_center_y = self.img_height / 2
        
        
        # define field of view
        self.cam_hfov = VN_config.get_float('camera','horizontal-fov',70.42)
        self.cam_vfov = VN_config.get_float('camera','vertical-fov',43.3)
        


        #get camera distortion matrix and intrinsics. Defaults: logitech c920
        mtx = np.array([[ 614.01269552,0,315.00073982],
                [0,614.43556296,237.14926858],
                [0,0,1.0]])
        dist = np.array([0.12269303, -0.26618881,0.00129035, 0.00081791,0.17005303])

        self.matrix  = VN_config.get_array('camera','matrix', mtx)
        self.distortion = VN_config.get_array('camera', 'distortion', dist)

        self.newcameramtx, self.roi=cv2.getOptimalNewCameraMatrix(self.matrix,self.distortion,(self.img_width,self.img_height),1,(self.img_width,self.img_height))


        #create a camera object
        self.camera = None


        #number of cores available for use
        desiredCores = VN_config.get_integer('processing', 'desired_cores', 4)
        self.cores_available = min(desiredCores, multiprocessing.cpu_count())


        #does the user want to capture images in the background
        self.background_capture = VN_config.get_boolean('processing','background_capture', True)


        # background image processing variables
        self.proc = None              # background process object
        self.parent_conn = None       # parent end of communicatoin pipe
        self.img_counter = 0          # num images requested so far
        self.is_backgroundCap = False #state variable for background capture
Exemple #4
0
	def __init__(self):

		#load config file
		VN_config.get_file('Smart_Camera')

		#get camera specs
		self.camera_index = VN_config.get_integer('camera','camera_index',0)
		self.camera_width = VN_config.get_integer('camera', 'camera_width', 640)
		self.camera_height = VN_config.get_integer('camera', 'camera_height', 480)
		self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov', 72.42)
		self.camera_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)

		#use simulator
		self.simulator = VN_config.get_boolean('simulator','use_simulator',True)

		#Run the program no matter what mode or location; Useful for debug purposes
		self.always_run = VN_config.get_boolean('general', 'always_run', True)

		#how many frames have been captured
		self.frame_count = 0

		#debugging: 
		self.kill_camera = False
Exemple #5
0
    def __init__(self):

        # get which camera we will use
        self.camera_index = VN_config.get_integer('camera', 'index', 0)

        # get image resolution
        self.img_width = VN_config.get_integer('camera', 'width', 640)
        self.img_height = VN_config.get_integer('camera', 'height', 480)

        # get image center
        self.img_center_x = self.img_width / 2
        self.img_center_y = self.img_height / 2

        # define field of view
        self.cam_hfov = VN_config.get_float('camera', 'horizontal-fov', 70.42)
        self.cam_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)

        #get camera distortion matrix and intrinsics. Defaults: logitech c920
        mtx = np.array([[614.01269552, 0, 315.00073982],
                        [0, 614.43556296, 237.14926858], [0, 0, 1.0]])
        dist = np.array(
            [0.12269303, -0.26618881, 0.00129035, 0.00081791, 0.17005303])

        self.matrix = VN_config.get_array('camera', 'matrix', mtx)
        self.distortion = VN_config.get_array('camera', 'distortion', dist)

        self.newcameramtx, self.roi = cv2.getOptimalNewCameraMatrix(
            self.matrix, self.distortion, (self.img_width, self.img_height), 1,
            (self.img_width, self.img_height))

        #create a camera object
        self.camera = None

        #number of cores available for use
        desiredCores = VN_config.get_integer('processing', 'desired_cores', 4)
        self.cores_available = min(desiredCores, multiprocessing.cpu_count())

        #does the user want to capture images in the background
        self.background_capture = VN_config.get_boolean(
            'processing', 'background_capture', True)

        # background image processing variables
        self.proc = None  # background process object
        self.parent_conn = None  # parent end of communicatoin pipe
        self.img_counter = 0  # num images requested so far
        self.is_backgroundCap = False  #state variable for background capture
Exemple #6
0
    def __init__(self):

        #The number of core to be used while processing image data
        #This number may be less than the actaul number of cores on the CPU depending on the users specifications
        #cores = min(desiredCores, multiprocessing.cpu_count()) //dont allow more cores than the CPU has available and don;t run more than the user wants
        desired_cores = VN_config.get_integer('processing', 'desired_cores', 4)
        available_cores = min(desired_cores, multiprocessing.cpu_count())
        #check if a core is already in use for background image capture
        cores_image_capture = int(
            VN_config.get_boolean('processing', 'background_capture', True))
        self.cores_processing = max(available_cores - cores_image_capture, 1)

        #The time(in millis) is takes to capture an Image
        #Frame rate = 1000/captureTime
        #****On some cameras frame rate is dependent on camera exposure level
        #****i.e. dark = slower fps and light = fast frame rate
        #****Capture time is dependent on available CPU
        #This time will be dynamically calculated using an rolling average
        self.captureTime = 0
        self.captureTimeSet = np.zeros(4)

        #The time(in millis) it takes to process an image
        #****Process time is dependent on available CPU and the image
        #This time will be dynamically calculated using an rolling average
        self.processTime = 0
        self.processTimeSet = np.zeros(4)

        #How often a image is dispatched to be processed(in millis)
        #Determined by splitting up processTime among available number of cores
        #*****This will not be smaller than captureTime because we won't allow it to process frames more often than capturing frames; it will cause sync issues with Pipe()
        #*****If the CPU is slow and has limited cores, we may process everyother frame or every nth frame!!!
        #runTime = max(processTime/processing_cores,captureTime)
        self.runTime = 0

        #set up a pipe to pass info between background processes and dispatcher
        self.parent_conn, self.child_conn = multiprocessing.Pipe()

        #last time we started to process an image
        self.lastDispatch = 0

        #last time an image process completed
        self.lastRetreival = 0
Exemple #7
0
	def __init__(self):

		#The number of core to be used while processing image data
		#This number may be less than the actaul number of cores on the CPU depending on the users specifications
		#cores = min(desiredCores, multiprocessing.cpu_count()) //dont allow more cores than the CPU has available and don;t run more than the user wants
		desired_cores = VN_config.get_integer('processing', 'desired_cores', 4)
		available_cores = min(desired_cores, multiprocessing.cpu_count())
		#check if a core is already in use for background image capture
		cores_image_capture = int(VN_config.get_boolean('processing','background_capture', True))
		self.cores_processing = max(available_cores - cores_image_capture,1)


		#The time(in millis) is takes to capture an Image
		#Frame rate = 1000/captureTime
		#****On some cameras frame rate is dependent on camera exposure level
		#****i.e. dark = slower fps and light = fast frame rate
		#****Capture time is dependent on available CPU
		#This time will be dynamically calculated using an rolling average
		self.captureTime = 0 
		self.captureTimeSet = np.zeros(4)

		#The time(in millis) it takes to process an image
		#****Process time is dependent on available CPU and the image
		#This time will be dynamically calculated using an rolling average
		self.processTime = 0
		self.processTimeSet = np.zeros(4)

		#How often a image is dispatched to be processed(in millis)
		#Determined by splitting up processTime among available number of cores
		#*****This will not be smaller than captureTime because we won't allow it to process frames more often than capturing frames; it will cause sync issues with Pipe()
		#*****If the CPU is slow and has limited cores, we may process everyother frame or every nth frame!!!
		#runTime = max(processTime/processing_cores,captureTime) 
		self.runTime = 0

		#set up a pipe to pass info between background processes and dispatcher
		self.parent_conn, self.child_conn = multiprocessing.Pipe()

		#last time we started to process an image
		self.lastDispatch = 0

		#last time an image process completed
		self.lastRetreival = 0