def test_for_four(self): four_allowed = 1-numpy.clip(cv2.imread("Resources/four_allowed.png", cv2.IMREAD_GRAYSCALE), 0, 1) contours = ImagePreprocessor.find_contours(four_allowed) number_of_recognized_signs1 = ImagePreprocessor.calculate_number_of_signs(four_allowed, contours) self.assertEqual(4, number_of_recognized_signs1)
def test_if_odd(self): two_allowed = 1 - numpy.clip(cv2.imread("Resources/odd.png", cv2.IMREAD_GRAYSCALE), 0, 1) contours = ImagePreprocessor.find_contours(two_allowed) number_of_recognized_signs = ImagePreprocessor.calculate_number_of_signs(two_allowed, contours) number_of_recognized_signs = number_of_recognized_signs % 2 self.assertEqual(1, number_of_recognized_signs)
def test_extract_lane_shape(self): data_in = numpy.clip( cv2.imread("Resources/Lane_Fork_Noise.png", cv2.IMREAD_GRAYSCALE), 0, 1) contours = ImagePreprocessor.find_contours(data_in) data_out = ImagePreprocessor.extract_lane_shape(data_in, contours) expected = numpy.clip( cv2.imread("Resources/Lane_Fork_Clean.png", cv2.IMREAD_GRAYSCALE), 0, 1) numpy.testing.assert_array_equal(data_out, expected)
def process_frame(self, image): """ Processes the frame captured by Cozmo's camera :param image: Current frame from Cozmo's feed :type image: PIL image """ # Convert image to binary bin_img = ImagePreprocessor.pil_rgb_to_numpy_binary(image) # Find contours on image contours = ImagePreprocessor.find_contours(bin_img) # Extract lane shape and remove noise lane_img = ImagePreprocessor.extract_lane_shape(bin_img, contours) # Create image for later display display_img = cv2.cvtColor(lane_img * 255, cv2.COLOR_GRAY2BGR) # Counting signs and overwrite attribute in Lane Analyzer if RobotStatusController.enable_sign_recognition and \ not Settings.disable_sign_detection and \ not RobotStatusController.disable_autonomous_behavior: RobotStatusController.sign_count = ImagePreprocessor.calculate_number_of_signs( display_img, contours) self.sign_handler.react_to_signs(RobotStatusController.sign_count) lane_correction = 0 if not RobotStatusController.disable_autonomous_behavior: # Calculate lane correction based on image data lane_correction = self.corr_calculator.calculate_lane_correction( lane_img) if not RobotStatusController.is_in_packet_station: crossing_type = CrossingTypeIdentifier.analyze_frame(lane_img) if crossing_type is not None: Navigator.navigate() if not RobotStatusController.disable_autonomous_behavior: # If correction is required let Cozmo correct if lane_correction is not None: self.drive_controller.correct(lane_correction) # Update current frame self.current_cam_frame = display_img * 255 # Show cam live preview if enabled if Settings.show_live_preview: # self.preview_utils.show_cam_frame(bin_img*255) self.preview_utils.show_cam_frame(display_img)
def create_row_patterns(img, step=10): """ Extracts pixel rows from the image :param img: Source images :type img: Binary numpy array :param step: Pixel distance between each row :return: An array of row patterns from top to bottom """ h, w = img.shape row_patterns = [] for i in range(0, h, step): rle_data = ImagePreprocessor.run_length_encoding(img[i]) detailed_pattern = ImagePreprocessor.cleanup_row_noise(rle_data) row_patterns.append(detailed_pattern[:, 1]) return row_patterns
def analyze_frame(image): """ Analyzes a frame to check it it contains a crossing. A crossing type needs to stay unchanged for 2 frames for it to be confirmed as valid and returned. :param image: The image as captured by Cozmos camera :return: The last confirmed crossing type """ correction_calculator_obj = InstanceManager.get_instance( "CorrectionCalculator") # If lane correction is too much the crossing may be invalid and should be discarded correction_points = correction_calculator_obj.last_points if correction_points is None or correction_points[0] is None or \ correction_points[0][0] < Settings.crossing_correction_min_dist_to_edge or \ correction_points[0][0] > image.shape[1] - Settings.crossing_correction_min_dist_to_edge: CrossingTypeIdentifier.last_confirmed_crossing_type = None return CrossingTypeIdentifier.last_confirmed_crossing_type # Crop out relevant area image = ImagePreprocessor.crop_image(image, Settings.crossing_top_crop, Settings.crossing_right_crop, Settings.crossing_bottom_crop, Settings.crossing_left_crop) # Obtain pixel rows from shape row_patterns = CrossingTypeIdentifier.create_row_patterns(image) # Filter out invalid patterns row_patterns = CrossingTypeIdentifier.filter_invalid_row_pattern( row_patterns) # Set last crossing type for preview window crossing_type = CrossingTypeIdentifier.row_patterns_to_crossing_type( row_patterns) # Confirm crossing type if at least on two frames if crossing_type == CrossingTypeIdentifier.last_crossing_type: CrossingTypeIdentifier.last_confirmed_crossing_type = crossing_type else: CrossingTypeIdentifier.last_confirmed_crossing_type = None CrossingTypeIdentifier.last_crossing_type = crossing_type return CrossingTypeIdentifier.last_confirmed_crossing_type
def test_run_length_encoding_normal_array(self): data_in = numpy.array([1, 1, 3, 2, 3, 3, 3, 2, 2, 1, 4, 4]) data_out = ImagePreprocessor.run_length_encoding(data_in) expected = numpy.array([[2, 1], [1, 3], [1, 2], [3, 3], [2, 2], [1, 1], [2, 4]]) numpy.testing.assert_array_equal(data_out, expected)
def test_cleanup_row_noise_invalid_suffix(self): data_in = numpy.array([[7, 1], [8, 0], [12, 1], [9, 0], [4, 1]]) data_out = ImagePreprocessor.cleanup_row_noise(data_in, 6) expected = numpy.array([[7, 1], [8, 0], [12, 1], [13, 0]]) numpy.testing.assert_array_equal(data_out, expected)
def test_cleanup_row_noise_double_invalid_middle(self): data_in = numpy.array([[7, 1], [2, 0], [3, 1], [9, 0], [12, 1]]) data_out = ImagePreprocessor.cleanup_row_noise(data_in, 6) expected = numpy.array([[12, 1], [9, 0], [12, 1]]) numpy.testing.assert_array_equal(data_out, expected)
def test_run_length_encoding_empty(self): data_in = numpy.array([]) data_out = ImagePreprocessor.run_length_encoding(data_in) expected = numpy.array([]) numpy.testing.assert_array_equal(data_out, expected)