class Main: def __init__(self): self.results = get_args() self.name = self.results.target # Check if requested target exists if not utils.is_target(self.name): return if self.results.camera == 'pi': camera_provider = PICamera() logging.info('Using PI Camera provider') elif self.results.camera == 'realsense': logging.info('Using RealSense camera provider') camera_provider = RealSense() elif self.results.camera == 'cv': camera_provider = CVCamera(self.results.port) else: logging.error('Invalid camera provider, this shouldn\'t happen') sys.exit(1) self.display = Display(provider=camera_provider) if self.results.local: self.hsv_handler = Trackbars(self.name) else: self.hsv_handler = FileHSV(self.name) if self.results.web: self.web = Web(self) self.web.start_thread() # Run web server if self.results.networktables: self.nt = nt_handler.NT(self.name) self.stop = False def change_name(self, name): """ Changes the name and starts a new loop. :param name: """ if not utils.is_target(name): return logging.info(f'Changing target to {name}') self.name = name self.hsv_handler.name = name self.hsv_handler.reload() self.stop = True def loop(self): printed = False # Check if requested target exists if not utils.is_target(self.name, False): return logging.info(f'Starting loop with target {self.name}') self.stop = False # We dynamically load classes in order to provide a modular base target = import_module(f'targets.{self.name}').Target(self) self.display.change_exposure(target.exposure) # Timer for FPS counter timer = time.time() avg = 0 while True: frame = self.display.get_frame() if frame is None: if not printed: logging.warning('Couldn\'t read from camera') printed = True continue else: printed = False # Separate frames for display purposes original = frame.copy() contour_image = frame.copy() # Target functions mask = target.create_mask(frame, self.hsv_handler.get_hsv()) contours, hierarchy = target.find_contours(mask) filtered_contours = target.filter_contours(contours, hierarchy) # Draw contours target.draw_contours(filtered_contours, contour_image) # Find distance and angle angle, distance = target.measurements(contour_image, filtered_contours) # Show FPS avg = utils.calculate_fps(contour_image, time.time(), timer, avg) timer = time.time() # Display self.display.process_frame(contour_image, 'contour image', self.results.local) self.display.process_frame(utils.bitwise_and(original, mask), 'mask', self.results.local) if self.results.networktables: if distance: self.nt.set_item('distance', distance) if angle: self.nt.set_item('angle', angle) if self.stop: # If stop signal was sent we call loop again to start with new name logging.warning('Restarting...') self.loop() break k = cv2.waitKey(1) & 0xFF # large wait time to remove freezing if k in (27, 113): logging.warning('Q pressed, stopping...') self.display.release() break
class Main: """ The main class for target recognition, which makes use of all other files in this directory. When recognising a target, only this file needs to be run. Attributes ---------- results the arguments returned from the run configuration parameters See: get_args() name : str - the name of the target, as received from self.results - if the target doesn't exist, shuts down the code See: is_target(name) in utils.py display : Display - the display that will be used for the current run - created according to camera_provider in __init__ hsv_handler : Trackbars or FileHSV - hsv handler, based on what is requested in self.results - if the run is to be displayed locally, the handler will show trackbars for tuning HSV - if the run is not to be displayed locally, the handler will just use the corresponding HSV file to self.name web : Web - streaming handler, if streaming is requested in self.results nt : nt_handler.NT - networktables handler, if networktbales are requested in self.results stop : bool - a variable checked at the end of each loop, notifies if a shut down is requested See: loop() """ def __init__(self): """ Create all initial handlers based on parameters from get_args. camera_provider : CVCamera or PICamera or RealSense - the type of camera to be used by self.display """ self.results = get_args() self.name = self.results.target # Check if requested target exists if not utils.is_target(self.name): return # Set the camera provider if self.results.camera == 'pi': camera_provider = PICamera() logging.info('Using PI Camera provider') elif self.results.camera == 'realsense': logging.info('Using RealSense camera provider') camera_provider = RealSense() elif self.results.camera == 'cv': camera_provider = CVCamera(self.results.port) else: logging.error('Invalid camera provider, this shouldn\'t happen') sys.exit(1) # Create the display self.display = Display(provider=camera_provider) if self.results.local: self.hsv_handler = Trackbars(self.name) else: self.hsv_handler = FileHSV(self.name) # Create the web server if self.results.web: self.web = Web(self) self.web.start_thread() # Create the networktables server if self.results.networktables: self.nt = nt_handler.NT(self.name) self.logger = Logger(self) self.stop = False def change_name(self, name): """ Changes the name of the target. Uses: Generally placed in the web handler, where it receives a string from the site. Is called before a new loop. See: update() in web.py :param name: The name of the new target. If it does not exist, the run is shut down. """ if not utils.is_target(name): return logging.info('Changing target to {}'.format(name)) self.name = name # Update the HSV variables to match the new target self.hsv_handler.name = name self.hsv_handler.reload() # Stop the current loop self.stop = True def loop(self): """ Recognises the target repeatedly. Utilises all handlers initialised in __init__. :return: If the target doesn't exist, the run is shut down. """ printed = False # Check if requested target exists if not utils.is_target(self.name, False): return logging.info('Starting loop with target {}'.format(self.name)) self.stop = False # Load the target class from the 'targets' directory target = import_module('targets.{}'.format(self.name)).Target(self) time.sleep(1) # Change camera exposure based on the target self.display.change_exposure(target.exposure) # Timer for FPS counter self.timer = time.time() avg = 0 while True: # Get initial frame frame = self.display.get_frame() # If the frame could not be read, flag it as unreadable to avoid errors if frame is None: if not printed: logging.warning('Couldn\'t read from camera') printed = True continue else: self.logger.record_latency() printed = False # Copy the initial frame for analysis and display, respectively original = frame.copy() contour_image = frame.copy() # Show FPS avg = utils.calculate_fps(contour_image, time.time(), self.timer, avg) self.timer = time.time() # Create a mask mask = target.create_mask(frame, self.hsv_handler.get_hsv()) # Get all contours contours, hierarchy = target.find_contours(mask) self.is_potential_target = bool(contours) # Filter contours filtered_contours = target.filter_contours(contours, hierarchy) self.is_target = bool(filtered_contours) # Draw contours target.draw_contours(filtered_contours, contour_image) self.logger.record_contours() # Find distance, angle, and other measurements if stated angle, distance, field_angle, additional_data = target.measurements(contour_image, filtered_contours) if self.results.web: # Stream frame self.web.frame = contour_image # Display frame self.display.process_frame(contour_image, 'image', self.results.local) # Display mask self.display.process_frame(utils.bitwise_and(original, mask), 'mask', self.results.local) # Send measurements to networktables, if requested, and if measurements were returned if self.results.networktables: if distance is not None: self.nt.set_item('distance', distance) if angle is not None: self.nt.set_item('angle', angle) if field_angle is not None: self.nt.set_item('field_angle', field_angle) # TODO: Send additional data if self.stop: # If stop signal was sent, call loop again to start with new name logging.warning('Restarting...') self.loop() break # Stop the code if q is pressed k = cv2.waitKey(1) & 0xFF # Large wait time to remove freezing if k in (27, 113): logging.warning('Q pressed, stopping...') # Release the camera and close all windows self.display.release() break
class Main: def __init__(self): self.name = 'hatch' # Neural target self.results = get_args() self.realsense = RealSense(constants.REALSENSE_SN) self.camera_provider = self.realsense time.sleep(1) # Let realsense run a bit before setting exposure self.realsense.set_exposure(10) if self.results.camera == 'pi': camera_provider = PICamera() logging.info('Using PI Camera provider') elif self.results.camera == 'realsense': logging.info('Using RealSense camera provider') camera_provider = self.realsense elif self.results.camera == 'cv': camera_provider = CVCamera(self.results.port) else: logging.error('Invalid camera provider, this shouldn\'t happen') sys.exit(1) self.display = Display(provider=camera_provider) if self.results.local: # self.tape_hsv_handler = Trackbars('2019_tape') self.cargo_hsv_handler = Trackbars('cargo_simple') self.tape_hsv_handler = FileHSV('2019_tape') else: self.tape_hsv_handler = FileHSV('2019_tape') self.cargo_hsv_handler = FileHSV('cargo_simple') if self.results.web: self.web = Web(self) self.web.start_thread() # Run web server if self.results.networktables: self.nt = nt_handler.NT('2019') self.stop = False def loop(self): printed = False logging.info('Starting loop') self.stop = False # Load targets logging.info('Loading targets......') tape = import_module('targets.2019_tape').Target(self) cargo_simple = import_module('targets.cargo_simple').Target(self) # hatch = import_module('neural_targets.hatch').Target(self) logging.info('Loading targets complete') # Timer for FPS counter timer = time.time() avg = 0 while True: frame = self.realsense.frame if frame is None: if not printed: logging.warning('Couldn\'t read from camera') printed = True continue else: printed = False # Separate frames for display purposes original = frame.copy() contour_image = frame.copy() # ---- Classic detection mask = tape.create_mask(frame, self.tape_hsv_handler.get_hsv()) contours, hierarchy = tape.find_contours(mask) filtered_contours = tape.filter_contours(contours, hierarchy) # Draw contours tape.draw_contours(filtered_contours, contour_image) rocket = self.results.networktables and self.nt.get_item( 'target_type', 'rocket') == 'rocket' rocket_hatch = rocket and self.nt.get_item('game_piece', 'hatch') == 'hatch' rocket_cargo = rocket and self.nt.get_item('game_piece', 'hatch') == 'cargo' # Find distance and angle tape_angle, tape_distance, tape_field_angle, data = tape.measurements( contour_image, filtered_contours, rocket_hatch, rocket_cargo, False) pair = data[0] if data else None hatch_angle = None hatch_distance = None # Cargo ship neural = self.results.networktables and self.nt.get_item( 'target_type', 'rocket') in ['cargoship', 'floor_hatch'] if neural: # ---- Neural detection boxes = hatch.boxes(original) # Draw on frame hatch.draw(contour_image, boxes) target_type = self.nt.get_item('target_type', 'rocket') pairs = data[1] if data else None if target_type == 'cargoship': hatch_pairs, non_hatch_pairs = self.match_hatches( pairs, boxes, frame, contour_image) if self.nt.get_item('game_piece', 'hatch') == 'hatch': chosen_pair = tape.get_closest_pair(non_hatch_pairs) else: non_cargo_pairs = self.match_cargos( cargo_simple, pairs, frame, boxes) chosen_pair = tape.get_closest_pair(non_cargo_pairs) tape_angle, tape_distance, tape_field_angle = tape.calculate( chosen_pair, original) if chosen_pair: # Draw chosen pair on screen x, y, w, h = cv2.boundingRect(chosen_pair[0]) x2, y2, w2, h2 = cv2.boundingRect(chosen_pair[1]) cv2.rectangle(contour_image, (x + w, y + h), (x2, y2), (123, 92, 249), 3) else: hatch_angle, hatch_distance, bounding_box = hatch.measurements( frame, boxes) else: if pair: tape_angle, tape_distance, tape_field_angle = tape.calculate( pair, original) cargo_angle = None cargo_distance = None if self.results.networktables and self.nt.get_item( 'target_type', 'sandstorm') == 'floor_cargo': cargo_angle, cargo_distance, field_angle, additional_data = self.simple_cargo_detection( cargo_simple, frame, contour_image) # Show FPS avg = utils.calculate_fps(contour_image, time.time(), timer, avg) timer = time.time() self.web.frame = contour_image # Display self.display.process_frame(contour_image, 'Image', self.results.local) self.display.process_frame(utils.bitwise_and(original, mask), 'Reflector mask', self.results.local) # Compensation for cameras not being in the center if tape_distance and tape_angle is not None: tape_distance, tape_angle = self.compensate_center( tape_distance, tape_angle) # Set values in network tables if self.results.networktables: # Tape self.nt.set_item('tape_seen', tape_angle is not None) if tape_angle is not None: self.nt.set_item('tape_angle', tape_angle) if tape_distance: self.nt.set_item('tape_distance', tape_distance) if tape_field_angle is not None: self.nt.set_item('tape_field_angle', tape_field_angle) # Hatch self.nt.set_item('hatch_seen', bool(hatch_distance)) if hatch_distance: self.nt.set_item('hatch_angle', hatch_angle) self.nt.set_item('hatch_distance', hatch_distance) # Cargo self.nt.set_item('cargo_seen', bool(cargo_distance)) if cargo_distance: self.nt.set_item('cargo_angle', cargo_angle) self.nt.set_item('cargo_distance', cargo_distance) if self.stop: # If stop signal was sent we call loop again to start with new name logging.warning('Restarting...') self.loop() break k = cv2.waitKey(1) & 0xFF # large wait time to remove freezing if k in (27, 113): logging.warning('Q pressed, stopping...') self.display.release() break def compensate_center(self, distance, center_angle): """ Compensate for cameras being offset on the genesis profile. :param distance: Original distance :param center_angle: Center angle before compensation :return: Distance and angle after compensation """ try: angle_adjustment = 1 if self.display.camera_provider.name == 'hatch': angle_adjustment = -1 compensated_distance = math.sqrt( distance**2 + constants.CAMERA_DISTANCE_FROM_CENTER**2 + 2 * distance * angle_adjustment * constants.CAMERA_DISTANCE_FROM_CENTER * math.sin(math.radians(center_angle))) compensated_angle = math.asin( (distance * math.sin(math.radians(center_angle)) + constants.CAMERA_DISTANCE_FROM_CENTER * angle_adjustment) / (compensated_distance)) return compensated_distance, math.degrees(compensated_angle) except ValueError: logging.warning('ValueError during compensation') return distance, center_angle def simple_cargo_detection(self, cargo_simple, frame, contour_image): """ Runs simple cargo detection. :param cargo_simple: Cargo simple target :param frame: Frame to run detection on :param contour_image: Contour image to draw on :return: Measurements from detection """ cargo_mask = cargo_simple.create_mask(frame, self.cargo_hsv_handler.get_hsv()) contours, hierarchy = cargo_simple.find_contours(cargo_mask) filtered_contours = cargo_simple.filter_contours(contours, hierarchy) # Draw contours cargo_simple.draw_contours(filtered_contours, contour_image) return cargo_simple.measurements(contour_image, filtered_contours) @staticmethod def match_hatches(pairs, boxes, frame, contour_image): """ Match hatches to hatch pairs and non hatch pairs. :param pairs: Reflection tape pairs :param boxes: Boundign boxes :param frame: Original frame :param contour_image: Frame to draw on """ hatch_pairs = [] non_hatch_pairs = [] if pairs: for pair in pairs: if pair: has_hatch = False for box in boxes: bounding_box = box['box'] hatch_x1, hatch_x2, hatch_y1, hatch_y2 = utils.bounding_box_coords( bounding_box, frame) middle_hatch_x = (hatch_x1 + hatch_x2) / 2 middle_hatch_y = (hatch_y1 - hatch_y2) / 2 reflector_x1, reflector_y1, reflector_w1, reflector_h1 = cv2.boundingRect( pair[0]) reflector_x2, reflector_y2, reflector_w2, reflector_h2 = cv2.boundingRect( pair[1]) reflector_x2 = reflector_x2 + reflector_w2 bay_highest_y = max(reflector_y1, reflector_y2) bay_lowest_y = min(reflector_y1 - (3 * reflector_h1), reflector_y2 - (3 * reflector_h2)) if reflector_x2 < middle_hatch_x < reflector_x1 and bay_lowest_y < middle_hatch_y < bay_highest_y: cv2.rectangle(contour_image, (int(reflector_x2 - reflector_w1), int(reflector_y2)), (int(hatch_x2), int(hatch_y2)), (10, 50, 200), 3) has_hatch = True if has_hatch: hatch_pairs.append(pair) else: non_hatch_pairs.append(pair) return hatch_pairs, non_hatch_pairs def match_cargos(self, cargo_simple, pairs, frame, boxes): """ Matches cargo and non cargo pairs. :param cargo_simple: Cargo simple target :param frame: Frame :param boxes: Hatch bounding boxes :return: Non cargo pairs """ non_cargo_pairs = [] simple_mask = cargo_simple.create_mask( frame, self.cargo_hsv_handler.get_hsv()) cargo_contours, hierarchy = cargo_simple.find_contours(simple_mask) for box in boxes: bounding_box = box['box'] hatch_x1, hatch_x2, hatch_y1, hatch_y2 = utils.bounding_box_coords( bounding_box, frame) found = False for cargo in cargo_contours: if cv2.contourArea(cargo) > 25: cargo_x, cargo_y, cargo_w, cargo_h = cv2.boundingRect( cargo) middle_cargo_x = (cargo_x + (cargo_w / 2)) middle_cargo_y = (cargo_y + (cargo_h / 2)) if hatch_x1 < middle_cargo_x < hatch_x2 and hatch_y1 < middle_cargo_y < hatch_y2: found = True if not found: # Find matching pair if pairs: for pair in pairs: if not pair: continue middle_hatch_x = (hatch_x1 + hatch_x2) / 2 middle_hatch_y = (hatch_y1 - hatch_y2) / 2 reflector_x1, reflector_y1, reflector_w1, reflector_h1 = cv2.boundingRect( pair[0]) reflector_x2, reflector_y2, reflector_w2, reflector_h2 = cv2.boundingRect( pair[1]) reflector_x2 = reflector_x2 + reflector_w2 bay_highest_y = max(reflector_y1, reflector_y2) bay_lowest_y = min(reflector_y1 - (3 * reflector_h1), reflector_y2 - (3 * reflector_h2)) if reflector_x2 < middle_hatch_x < reflector_x1 and \ bay_lowest_y < middle_hatch_y < bay_highest_y: non_cargo_pairs.append(pair) break return non_cargo_pairs
class Main: def __init__(self): self.results = get_args() self.name = self.results.target # Check if requested target exists if not utils.is_target(self.name, neural=True): return if self.results.camera == 'pi': camera_provider = PICamera() logging.info('Using PI Camera provider') elif self.results.camera == 'realsense': logging.info('Using RealSense camera provider') camera_provider = RealSense() elif self.results.camera == 'cv': camera_provider = CVCamera(self.results.port) else: logging.error('Invalid camera provider, this shouldn\'t happen') sys.exit(1) self.display = Display(provider=camera_provider) if self.results.web: self.web = Web(self) self.web.start_thread() # Run web server if self.results.networktables: self.nt = nt_handler.NT(self.name) self.stop = False self.session = None def change_name(self, name): """ Changes the name and starts a new loop. :param name: """ if not utils.is_target(name, neural=True): return logging.info('Changing target to {}'.format(name)) self.name = name self.stop = True def loop(self): printed = False # Check if requested target exists if not utils.is_target(self.name, False, True): return logging.info('Starting loop with target {}'.format(self.name)) self.stop = False # We dynamically load classes in order to provide a modular base target = import_module('neural_targets.{}'.format( self.name)).Target(self) self.display.change_exposure(target.exposure) # Timer for FPS counter timer = time.time() avg = 0 while True: frame = self.display.get_frame() if frame is None: if not printed: logging.warning('Couldn\'t read from camera') printed = True continue else: printed = False # Get bounding boxes boxes = target.boxes(frame) # Draw on frame target.draw(frame, boxes) angle, distance, bounding_box = target.measurements(frame, boxes) # Show FPS avg = utils.calculate_fps(frame, time.time(), timer, avg) timer = time.time() # Web self.web.frame = frame # Display self.display.process_frame(frame, 'image', self.results.local) if self.results.networktables: if distance: self.nt.set_item('distance', distance) if angle: self.nt.set_item('angle', angle) if self.stop: # If stop signal was sent we call loop again to start with new name logging.warning('Restarting...') self.loop() break k = cv2.waitKey(1) & 0xFF # large wait time to remove freezing if k in (27, 113): logging.warning('Q pressed, stopping...') self.display.release() self.session.close() break