Ejemplo n.º 1
0
	def __init__(self):
		#load algorithm constants
		#how round a circle needs to be. Perfect circle = 1
		self.eccentricity = VN_config.get_float('algorithm', 'eccentricity', 0.6)
		#Minimum ratio while comparing contour area to ellipse area
		self.area_ratio = VN_config.get_float('algorithm','area_ratio', 0.8)
		#Minimum ratio between outer and inner circle area in a ring
		self.ring_ratio = VN_config.get_float('algorithm','ring_ratio', 0.1)
		#The smallest span of min max pixels that get enhanced(Largest range is 255, Smaller numbers make image suspitable to noise)
		self.min_range = VN_config.get_integer('algorithm', 'min_range', 10)
		#reduce the grayscale resoltion(steps) by this multipler( 1 is full res, 2 is half res, 4 is quarter res )
		self.res_reduction = VN_config.get_integer('algorithm', 'res_reduction', 32)
Ejemplo n.º 2
0
 def __init__(self):
     #load algorithm constants
     #how round a circle needs to be. Perfect circle = 1
     self.eccentricity = VN_config.get_float('algorithm', 'eccentricity',
                                             0.6)
     #Minimum ratio while comparing contour area to ellipse area
     self.area_ratio = VN_config.get_float('algorithm', 'area_ratio', 0.8)
     #Minimum ratio between outer and inner circle area in a ring
     self.ring_ratio = VN_config.get_float('algorithm', 'ring_ratio', 0.1)
     #The smallest span of min max pixels that get enhanced(Largest range is 255, Smaller numbers make image suspitable to noise)
     self.min_range = VN_config.get_integer('algorithm', 'min_range', 10)
     #reduce the grayscale resoltion(steps) by this multipler( 1 is full res, 2 is half res, 4 is quarter res )
     self.res_reduction = VN_config.get_integer('algorithm',
                                                'res_reduction', 32)
Ejemplo n.º 3
0
	def __init__(self):

		
		self.targetLocation = PositionVector()
		self.vehicleLocation = PositionVector()

		self.backgroundColor = (74,88,109)

		#define camera
		self.camera_width = VN_config.get_integer('camera', 'width', 640)
		self.camera_height = VN_config.get_integer('camera', 'height', 640)
		self.camera_vfov = VN_config.get_float('camera', 'vertical-fov',72.42 )
		self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov', 72.42)
		self.camera_fov = math.sqrt(self.camera_vfov**2 + self.camera_hfov**2)
		self.camera_frameRate = 30
Ejemplo n.º 4
0
    def __init__(self):

        self.targetLocation = PositionVector()
        self.vehicleLocation = PositionVector()

        self.backgroundColor = (74, 88, 109)

        #define camera
        self.camera_width = VN_config.get_integer('camera', 'width', 640)
        self.camera_height = VN_config.get_integer('camera', 'height', 640)
        self.camera_vfov = VN_config.get_float('camera', 'vertical-fov', 72.42)
        self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov',
                                               72.42)
        self.camera_fov = math.sqrt(self.camera_vfov**2 + self.camera_hfov**2)
        self.camera_frameRate = 30
Ejemplo n.º 5
0
    def __init__(self):

        # get which camera we will use
        self.camera_index = VN_config.get_integer('camera','index',0)


        # get image resolution
        self.img_width = VN_config.get_integer('camera','width',640)
        self.img_height = VN_config.get_integer('camera','height',480)
        

        # get image center
        self.img_center_x = self.img_width / 2
        self.img_center_y = self.img_height / 2
        
        
        # define field of view
        self.cam_hfov = VN_config.get_float('camera','horizontal-fov',70.42)
        self.cam_vfov = VN_config.get_float('camera','vertical-fov',43.3)
        


        #get camera distortion matrix and intrinsics. Defaults: logitech c920
        mtx = np.array([[ 614.01269552,0,315.00073982],
                [0,614.43556296,237.14926858],
                [0,0,1.0]])
        dist = np.array([0.12269303, -0.26618881,0.00129035, 0.00081791,0.17005303])

        self.matrix  = VN_config.get_array('camera','matrix', mtx)
        self.distortion = VN_config.get_array('camera', 'distortion', dist)

        self.newcameramtx, self.roi=cv2.getOptimalNewCameraMatrix(self.matrix,self.distortion,(self.img_width,self.img_height),1,(self.img_width,self.img_height))


        #create a camera object
        self.camera = None


        #number of cores available for use
        desiredCores = VN_config.get_integer('processing', 'desired_cores', 4)
        self.cores_available = min(desiredCores, multiprocessing.cpu_count())


        #does the user want to capture images in the background
        self.background_capture = VN_config.get_boolean('processing','background_capture', True)


        # background image processing variables
        self.proc = None              # background process object
        self.parent_conn = None       # parent end of communicatoin pipe
        self.img_counter = 0          # num images requested so far
        self.is_backgroundCap = False #state variable for background capture
Ejemplo n.º 6
0
    def __init__(self):
        # add variable initialisation here
        self.api = None
        self.vehicle = None

        self.last_mode_call = 0
        self.last_mode_state = 'STABILIZE'
        self.mode_update_rate = VN_config.get_float('vehicle control', 'mode_update_rate', 0.75)

        self.last_home_call = 0
        self.last_home = None
        self.home_update_rate = VN_config.get_float('vehicle control', 'home_update_rate', 10)

        self.last_set_velocity = 0
        self.vel_update_rate = VN_config.get_float('vehicle control', 'vel_update_rate', 0.1)

        self.last_report_landing_target = 0
        self.landing_update_rate = VN_config.get_float('vehicle control', 'landing_update_rate', 0.02)
Ejemplo n.º 7
0
    def __init__(self):
        #create sub driectory for logs and videos
        self.location = VN_config.get_string('logging', 'location',
                                             os.environ['HOME'] + '/visnav/')

        path = self.location + 'logs'
        if not os.path.exists(path):
            os.makedirs(path)
        path = self.location + 'vids'
        if not os.path.exists(path):
            os.makedirs(path)

        #set default startegy name
        self.strat_name = 'Smart_Camera'

        # get image resolution
        self.img_width = VN_config.get_integer('camera', 'width', 640)
        self.img_height = VN_config.get_integer('camera', 'height', 480)

        #####TEXT LOGGING######
        #levels = 'debug' , 'general' , 'aircraft' , 'algorithm' , ' performance'
        #multiple message levels can be selected by concatination strings i.e. 'debug, aircraft'
        #what type of messages we print to the terminal
        self.print_level = VN_config.get_string('logging', 'print_level',
                                                'debug, general')
        #what type of messages we log to a file
        self.log_level = VN_config.get_string('logging', 'log_level',
                                              'aircraft, algorithm, general')

        #####VIDEO RECORDING######
        #levels = 'frame' , 'gui'
        #multiple message levels can be selected at once by concatination strings i.e. 'frame, gui'
        #what type of images we display on the screen
        self.display_level = VN_config.get_string('logging', 'display_level',
                                                  'raw, gui')
        #what type of images we record
        self.record_level = VN_config.get_string('logging', 'record_level',
                                                 'raw')
        #Write a video or individual images
        self.record_type = VN_config.get_string('logging', 'record_type',
                                                'image')

        #Note about useful logging practices:
        #Inorder to replay a flight through the program log_level must include 'aircraft' and 'algorithm'
        #record level must include 'frame'
        #all other logging levels are for user diagnostics

        #a list of video writers and their tag
        self.video_writers = []

        #text logger
        self.logger = None
Ejemplo n.º 8
0
    def __init__(self):

        #The number of core to be used while processing image data
        #This number may be less than the actaul number of cores on the CPU depending on the users specifications
        #cores = min(desiredCores, multiprocessing.cpu_count()) //dont allow more cores than the CPU has available and don;t run more than the user wants
        desired_cores = VN_config.get_integer('processing', 'desired_cores', 4)
        available_cores = min(desired_cores, multiprocessing.cpu_count())
        #check if a core is already in use for background image capture
        cores_image_capture = int(
            VN_config.get_boolean('processing', 'background_capture', True))
        self.cores_processing = max(available_cores - cores_image_capture, 1)

        #The time(in millis) is takes to capture an Image
        #Frame rate = 1000/captureTime
        #****On some cameras frame rate is dependent on camera exposure level
        #****i.e. dark = slower fps and light = fast frame rate
        #****Capture time is dependent on available CPU
        #This time will be dynamically calculated using an rolling average
        self.captureTime = 0
        self.captureTimeSet = np.zeros(4)

        #The time(in millis) it takes to process an image
        #****Process time is dependent on available CPU and the image
        #This time will be dynamically calculated using an rolling average
        self.processTime = 0
        self.processTimeSet = np.zeros(4)

        #How often a image is dispatched to be processed(in millis)
        #Determined by splitting up processTime among available number of cores
        #*****This will not be smaller than captureTime because we won't allow it to process frames more often than capturing frames; it will cause sync issues with Pipe()
        #*****If the CPU is slow and has limited cores, we may process everyother frame or every nth frame!!!
        #runTime = max(processTime/processing_cores,captureTime)
        self.runTime = 0

        #set up a pipe to pass info between background processes and dispatcher
        self.parent_conn, self.child_conn = multiprocessing.Pipe()

        #last time we started to process an image
        self.lastDispatch = 0

        #last time an image process completed
        self.lastRetreival = 0
Ejemplo n.º 9
0
	def __init__(self):

		#The number of core to be used while processing image data
		#This number may be less than the actaul number of cores on the CPU depending on the users specifications
		#cores = min(desiredCores, multiprocessing.cpu_count()) //dont allow more cores than the CPU has available and don;t run more than the user wants
		desired_cores = VN_config.get_integer('processing', 'desired_cores', 4)
		available_cores = min(desired_cores, multiprocessing.cpu_count())
		#check if a core is already in use for background image capture
		cores_image_capture = int(VN_config.get_boolean('processing','background_capture', True))
		self.cores_processing = max(available_cores - cores_image_capture,1)


		#The time(in millis) is takes to capture an Image
		#Frame rate = 1000/captureTime
		#****On some cameras frame rate is dependent on camera exposure level
		#****i.e. dark = slower fps and light = fast frame rate
		#****Capture time is dependent on available CPU
		#This time will be dynamically calculated using an rolling average
		self.captureTime = 0 
		self.captureTimeSet = np.zeros(4)

		#The time(in millis) it takes to process an image
		#****Process time is dependent on available CPU and the image
		#This time will be dynamically calculated using an rolling average
		self.processTime = 0
		self.processTimeSet = np.zeros(4)

		#How often a image is dispatched to be processed(in millis)
		#Determined by splitting up processTime among available number of cores
		#*****This will not be smaller than captureTime because we won't allow it to process frames more often than capturing frames; it will cause sync issues with Pipe()
		#*****If the CPU is slow and has limited cores, we may process everyother frame or every nth frame!!!
		#runTime = max(processTime/processing_cores,captureTime) 
		self.runTime = 0

		#set up a pipe to pass info between background processes and dispatcher
		self.parent_conn, self.child_conn = multiprocessing.Pipe()

		#last time we started to process an image
		self.lastDispatch = 0

		#last time an image process completed
		self.lastRetreival = 0
Ejemplo n.º 10
0
    def __init__(self):

        # get which camera we will use
        self.camera_index = VN_config.get_integer('camera', 'index', 0)

        # get image resolution
        self.img_width = VN_config.get_integer('camera', 'width', 640)
        self.img_height = VN_config.get_integer('camera', 'height', 480)

        # get image center
        self.img_center_x = self.img_width / 2
        self.img_center_y = self.img_height / 2

        # define field of view
        self.cam_hfov = VN_config.get_float('camera', 'horizontal-fov', 70.42)
        self.cam_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)

        #get camera distortion matrix and intrinsics. Defaults: logitech c920
        mtx = np.array([[614.01269552, 0, 315.00073982],
                        [0, 614.43556296, 237.14926858], [0, 0, 1.0]])
        dist = np.array(
            [0.12269303, -0.26618881, 0.00129035, 0.00081791, 0.17005303])

        self.matrix = VN_config.get_array('camera', 'matrix', mtx)
        self.distortion = VN_config.get_array('camera', 'distortion', dist)

        self.newcameramtx, self.roi = cv2.getOptimalNewCameraMatrix(
            self.matrix, self.distortion, (self.img_width, self.img_height), 1,
            (self.img_width, self.img_height))

        #create a camera object
        self.camera = None

        #number of cores available for use
        desiredCores = VN_config.get_integer('processing', 'desired_cores', 4)
        self.cores_available = min(desiredCores, multiprocessing.cpu_count())

        #does the user want to capture images in the background
        self.background_capture = VN_config.get_boolean(
            'processing', 'background_capture', True)

        # background image processing variables
        self.proc = None  # background process object
        self.parent_conn = None  # parent end of communicatoin pipe
        self.img_counter = 0  # num images requested so far
        self.is_backgroundCap = False  #state variable for background capture
Ejemplo n.º 11
0
    def __init__(self):
        # add variable initialisation here
        self.api = None
        self.vehicle = None

        self.last_mode_call = 0
        self.last_mode_state = 'STABILIZE'
        self.mode_update_rate = VN_config.get_float('vehicle control',
                                                    'mode_update_rate', 0.75)

        self.last_home_call = 0
        self.last_home = None
        self.home_update_rate = VN_config.get_float('vehicle control',
                                                    'home_update_rate', 10)

        self.last_set_velocity = 0
        self.vel_update_rate = VN_config.get_float('vehicle control',
                                                   'vel_update_rate', 0.1)

        self.last_report_landing_target = 0
        self.landing_update_rate = VN_config.get_float('vehicle control',
                                                       'landing_update_rate',
                                                       0.02)
Ejemplo n.º 12
0
    def __init__(self):
        # create sub driectory for logs and videos
        self.location = VN_config.get_string("logging", "location", os.environ["HOME"] + "/visnav/")

        path = self.location + "logs"
        if not os.path.exists(path):
            os.makedirs(path)
        path = self.location + "vids"
        if not os.path.exists(path):
            os.makedirs(path)

            # set default startegy name
        self.strat_name = "Smart_Camera"

        # get image resolution
        self.img_width = VN_config.get_integer("camera", "width", 640)
        self.img_height = VN_config.get_integer("camera", "height", 480)

        #####TEXT LOGGING######
        # levels = 'debug' , 'general' , 'aircraft' , 'algorithm' , ' performance'
        # multiple message levels can be selected by concatination strings i.e. 'debug, aircraft'
        # what type of messages we print to the terminal
        self.print_level = VN_config.get_string("logging", "print_level", "debug, general")
        # what type of messages we log to a file
        self.log_level = VN_config.get_string("logging", "log_level", "aircraft, algorithm, general")

        #####VIDEO RECORDING######
        # levels = 'frame' , 'gui'
        # multiple message levels can be selected at once by concatination strings i.e. 'frame, gui'
        # what type of images we display on the screen
        self.display_level = VN_config.get_string("logging", "display_level", "raw, gui")
        # what type of images we record
        self.record_level = VN_config.get_string("logging", "record_level", "raw")
        # Write a video or individual images
        self.record_type = VN_config.get_string("logging", "record_type", "image")

        # Note about useful logging practices:
        # Inorder to replay a flight through the program log_level must include 'aircraft' and 'algorithm'
        # record level must include 'frame'
        # all other logging levels are for user diagnostics

        # a list of video writers and their tag
        self.video_writers = []

        # text logger
        self.logger = None
Ejemplo n.º 13
0
	def __init__(self):

		#load config file
		VN_config.get_file('Smart_Camera')

		#get camera specs
		self.camera_index = VN_config.get_integer('camera','camera_index',0)
		self.camera_width = VN_config.get_integer('camera', 'camera_width', 640)
		self.camera_height = VN_config.get_integer('camera', 'camera_height', 480)
		self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov', 72.42)
		self.camera_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)

		#use simulator
		self.simulator = VN_config.get_boolean('simulator','use_simulator',True)

		#Run the program no matter what mode or location; Useful for debug purposes
		self.always_run = VN_config.get_boolean('general', 'always_run', True)

		#how many frames have been captured
		self.frame_count = 0

		#debugging: 
		self.kill_camera = False
Ejemplo n.º 14
0
	def __init__(self):
		#create sub driectory for logs and videos
		self.location = VN_config.get_string('logging','location','../../visnav/')

		path = self.location + 'logs'
		if not os.path.exists(path):
		    os.makedirs(path)
		path = self.location + 'vids'
		if not os.path.exists(path):
		    os.makedirs(path)

		#set default startegy name
		self.strat_name = 'Smart_Camera'
		
		# get image resolution
		self.img_width = VN_config.get_integer('camera','width',640)
		self.img_height = VN_config.get_integer('camera','height',480)

		#####TEXT LOGGING######
		#levels = 'debug' , 'general' , 'aircraft' , 'algorithm' , ' performance'
		#multiple message levels can be selected by concatination strings i.e. 'debug, aircraft' 
		#what type of messages we print to the terminal
		self.print_level = VN_config.get_string('logging','print_level','debug, general')
		#what type of messages we log to a file
		self.log_level = VN_config.get_string('logging','log_level','aircraft , algorithm, general')


		#####VIDEO RECORDING######
		#levels = 'frame' , 'gui'
		#multiple message levels can be selected at once by concatination strings i.e. 'frame, gui' 
		#what type of images we display on the screen
		self.display_level = VN_config.get_string('logging', 'display_level', 'raw, gui')
		#what type of images we record
		self.record_level = VN_config.get_string('logging', 'record_level', 'raw')

		#Note about useful logging practices:
			#Inorder to replay a flight through the program log_level must include 'aircraft' and 'algorithm'
			#record level must include 'frame'
			#all other logging levels are for user diagnostics

		#a list of video writers and their tag
		self.video_writers = []

		#text logger
		self.logger = None
Ejemplo n.º 15
0
	def __init__(self):
		self.targetLocation = PositionVector()
		self.vehicleLocation = PositionVector()

		self.backgroundColor = (74,88,109)


		#load target
		filename = VN_config.get_string('simulator', 'target_location', '../../visnav/target.jpg')
		target_size = VN_config.get_float('algorithm', 'outer_ring', 1.0)
		self.load_target(filename,target_size)


		#define camera
		self.camera_width = VN_config.get_integer('camera', 'width', 640)
		self.camera_height = VN_config.get_integer('camera', 'height', 640)
		self.camera_vfov = VN_config.get_float('camera', 'vertical-fov',72.42 )
		self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov', 72.42)
		self.camera_fov = math.sqrt(self.camera_vfov**2 + self.camera_hfov**2)
		self.camera_frameRate = 30
Ejemplo n.º 16
0
    def __init__(self):
        self.targetLocation = PositionVector()
        self.vehicleLocation = PositionVector()

        self.backgroundColor = (74, 88, 109)

        #load target
        filename = VN_config.get_string('simulator', 'target_location',
                                        '../../visnav/target.jpg')
        target_size = VN_config.get_float('algorithm', 'outer_ring', 1.0)
        self.load_target(filename, target_size)

        #define camera
        self.camera_width = VN_config.get_integer('camera', 'width', 640)
        self.camera_height = VN_config.get_integer('camera', 'height', 640)
        self.camera_vfov = VN_config.get_float('camera', 'vertical-fov', 72.42)
        self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov',
                                               72.42)
        self.camera_fov = math.sqrt(self.camera_vfov**2 + self.camera_hfov**2)
        self.camera_frameRate = 30
Ejemplo n.º 17
0
    def __init__(self):
        #load algorithm constants
        #how round a circle needs to be. Perfect circle = 1
        self.eccentricity = VN_config.get_float('algorithm', 'eccentricity',
                                                0.6)
        #acceptable distance(pixels) between cocentric circle centers
        self.distance_threshold = VN_config.get_integer(
            'algorithm', 'distance_threshold', 15)
        # number of circles needed for a valid target(times 2); 2 circles are often overlayed
        self.min_circles = VN_config.get_integer('algorithm', 'min_circls', 5)
        #pixels: used to identify repeat circles(stacked circles). Problem caused by findContours()
        self.radius_tolerance = VN_config.get_integer('algorithm',
                                                      'radius_tolerance', 2)
        #Tolerance used in comparing actaul ratios and preceived ratios
        self.ratio_tolerance = VN_config.get_float('algorithm',
                                                   'ratio_tolerance', 0.015)

        #target specific data
        #target_code is the unique ratio between rings
        target_code_def = np.array([0.8, 0.91, 0.76, 0.84, 0.7, 0.66, 0.49])
        self.target_code = VN_config.get_array('algorithm', 'target_code',
                                               target_code_def)
        #the outer_ring is a scaling factor for targets of various sizes; radius of outer ring in meters
        self.outer_ring = VN_config.get_float('algorithm', 'outer_ring',
                                              0.08255)

        #define field of view
        self.cam_hfov = VN_config.get_float('camera', 'horizontal-fov', 70.42)
        self.cam_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)\

        #define camera size
        self.cam_width = VN_config.get_integer('camera', 'width', 640)
        self.cam_height = VN_config.get_integer('camera', 'height', 480)
Ejemplo n.º 18
0
    def __init__(self):

        # load config file
        VN_config.get_file("Smart_Camera")

        # get camera specs
        self.camera_index = VN_config.get_integer("camera", "camera_index", 0)
        self.camera_width = VN_config.get_integer("camera", "camera_width", 640)
        self.camera_height = VN_config.get_integer("camera", "camera_height", 480)
        self.camera_hfov = VN_config.get_float("camera", "horizontal-fov", 72.42)
        self.camera_vfov = VN_config.get_float("camera", "vertical-fov", 43.3)

        # simulator
        self.simulator = VN_config.get_boolean("simulator", "use_simulator", True)
        self.target_file = VN_config.get_string(
            "simulator", "target_location", os.environ["HOME"] + "/visnav/target.jpg"
        )
        self.target_size = VN_config.get_float("algorithm", "outer_ring", 1.0)

        # Run the program no matter what mode or location; Useful for debug purposes
        self.always_run = VN_config.get_boolean("general", "always_run", True)

        # how many frames have been captured
        self.frames_captured = 0

        # debugging:
        self.kill_camera = False
Ejemplo n.º 19
0
	def __init__(self):
		#load algorithm constants
		#how round a circle needs to be. Perfect circle = 1
		self.eccentricity = VN_config.get_float('algorithm', 'eccentricity', 0.6)
		#acceptable distance(pixels) between cocentric circle centers
		self.distance_threshold = VN_config.get_integer('algorithm','distance_threshold', 15)
		# number of circles needed for a valid target(times 2); 2 circles are often overlayed
		self.min_circles = VN_config.get_integer('algorithm','min_circls',5)
		#pixels: used to identify repeat circles(stacked circles). Problem caused by findContours()
		self.radius_tolerance = VN_config.get_integer('algorithm', 'radius_tolerance', 2)
		#Tolerance used in comparing actaul ratios and preceived ratios
		self.ratio_tolerance = VN_config.get_float('algorithm', 'ratio_tolerance', 0.015)


		#target specific data
		#target_code is the unique ratio between rings
		target_code_def = np.array([0.8,0.91,0.76,0.84,0.7,0.66,0.49])
		self.target_code = VN_config.get_array('algorithm', 'target_code',target_code_def)
		#the outer_ring is a scaling factor for targets of various sizes; radius of outer ring in meters
		self.outer_ring = VN_config.get_float('algorithm', 'outer_ring', 0.08255)

		#define field of view
		self.cam_hfov = VN_config.get_float('camera', 'horizontal-fov', 70.42)
		self.cam_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)

		#define camera size
		self.cam_width = VN_config.get_integer('camera', 'width', 640)
		self.cam_height = VN_config.get_integer('camera', 'height', 480)
Ejemplo n.º 20
0
    def __init__(self):

        #load config file
        VN_config.get_file('Smart_Camera')

        #get camera specs
        self.camera_index = VN_config.get_integer('camera', 'camera_index', 0)
        self.camera_width = VN_config.get_integer('camera', 'camera_width',
                                                  640)
        self.camera_height = VN_config.get_integer('camera', 'camera_height',
                                                   480)
        self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov',
                                               72.42)
        self.camera_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)

        #simulator
        self.simulator = VN_config.get_boolean('simulator', 'use_simulator',
                                               True)
        self.target_file = VN_config.get_string(
            'simulator', 'target_location',
            os.environ['HOME'] + '/visnav/target.jpg')
        self.target_size = VN_config.get_float('algorithm', 'outer_ring', 1.0)

        #Run the program no matter what mode or location; Useful for debug purposes
        self.always_run = VN_config.get_boolean('general', 'always_run', True)

        #how many frames have been captured
        self.frames_captured = 0

        #debugging:
        self.kill_camera = False
Ejemplo n.º 21
0
#!/usr/bin/python
#SYSTEM IMPORTS
import numpy as np

#COMMOM IMPORTS
from Common.VN_config import VN_config

#####ALGORITHM#######

#how round a circle needs to be. Perfect circle = 1
VN_config.set_float('algorithm', 'eccentricity', 0.6)
#acceptable distance(pixels) between cocentric circle centers
VN_config.set_integer('algorithm', 'distance_threshold', 15)
# number of circles needed for a valid target(times 2); 2 circles are often overlayed
VN_config.set_integer('algorithm', 'min_circles', 5)
#pixels: used to identify repeat circles(stacked circles). Problem caused by findContours()
VN_config.set_integer('algorithm', 'radius_tolerance', 2)
#Tolerance used in comparing actaul ratios and preceived ratios
VN_config.set_float('algorithm', 'ratio_tolerance', 0.015)

#target specific data
#target_code is the unique ratio between rings
target_code_def = np.array([0.8, 0.91, 0.76, 0.84, 0.7, 0.66, 0.49])
VN_config.set_array('algorithm', 'target_code', target_code_def)
#the outer_ring is a scaling factor for targets of various sizes; radius of outer ring in meters
VN_config.set_float('algorithm', 'outer_ring', 0.08255)

####PROCESSING#######

VN_config.set_integer('processing', 'desired_cores', 4)
#check if a core is already in use for background image capture
Ejemplo n.º 22
0
#!/usr/bin/python
#SYSTEM IMPORTS
import numpy as np

#COMMOM IMPORTS
from Common.VN_config import VN_config



#####ALGORITHM#######

#how round a circle needs to be. Perfect circle = 1
VN_config.set_float('algorithm', 'eccentricity', 0.6)
#acceptable distance(pixels) between cocentric circle centers
VN_config.set_integer('algorithm','distance_threshold', 15)
# number of circles needed for a valid target(times 2); 2 circles are often overlayed
VN_config.set_integer('algorithm','min_circles',5)
#pixels: used to identify repeat circles(stacked circles). Problem caused by findContours()
VN_config.set_integer('algorithm', 'radius_tolerance', 2)
#Tolerance used in comparing actaul ratios and preceived ratios 
VN_config.set_float('algorithm', 'ratio_tolerance', 0.015)


#target specific data
#target_code is the unique ratio between rings
target_code_def = np.array([0.8,0.91,0.76,0.84,0.7,0.66,0.49])
VN_config.set_array('algorithm', 'target_code',target_code_def)
#the outer_ring is a scaling factor for targets of various sizes; radius of outer ring in meters
VN_config.set_float('algorithm', 'outer_ring', 0.08255)