def test_detected_frame(self): """Test if input frames background is being removed correctly. """ # setup expected_path = utils.get_full_path( 'docs/material_for_testing/back_ground_removed_frame.jpg') expected = cv2.imread(expected_path) test_path = utils.get_full_path( 'docs/material_for_testing/face_and_hand_0.avi') cap = cv2.VideoCapture(test_path) flags_handler = FlagsHandler() back_ground_remover = BackGroundRemover(flags_handler) ret = True # run while ret is True: ret, frame = cap.read() if ret is True: back_ground_remover.detected_frame = frame # write_path = utils.get_full_path('docs') # cv2.imwrite(write_path+'/back_ground_removed_frame.jpg',back_ground_remover.detected_frame) ssim = ImageTestTool.compare_imaged(back_ground_remover.detected_frame, expected) # print("SSIM: {}".format(ssim)) assert ssim >= 0.95 # teardown cap.release() cv2.destroyAllWindows()
def test_draw_contour(self): """Test is contour is being drawn accordingly to flags_handles. """ # setup # Input from camera. cv2.namedWindow('test_draw_contour') test_path = utils.get_full_path( 'docs/material_for_testing/back_ground_removed_frame.jpg') test_image = cv2.imread(test_path) # Because image loaded from local, and not received from web-cam, a flip is needed. test_image = cv2.flip(test_image, 1) expected = test_image.copy() flags_handler = FlagsHandler() # Set flags_handler in order to perform the test. flags_handler.lifted = True flags_handler.calibrated = True detector = Detector(flags_handler) # run while flags_handler.quit_flag is False: """ Inside loop, update self._threshold according to flags_handler, Pressing 'c': in order to toggle control (suppose to change contour's color between green and red) Pressing 'l': to raise 'land' flag in flags_handler, in order to be able to break loop (with esc) Pressing esc: break loop. """ detector.input_frame_for_feature_extraction = test_image cv2.imshow('test_draw_contour', detector.input_frame_for_feature_extraction) flags_handler.keyboard_input = cv2.waitKey(1) # teardown cv2.destroyAllWindows()
def test_input_frame(self): """Test if input frame preprocessed correctly. """ # setup test_path = utils.get_full_path( 'docs/material_for_testing/face_and_hand.jpg') test_image = cv2.imread(test_path) # Because image loaded from local, and not received from web-cam, a flip is needed, # inside frame_handler, a frame is supposed to be received from web-cam, hence it is flipped after receiving it. test_image = cv2.flip(test_image, 1) # type: np.ndarray expected = test_image.copy() expected = cv2.bilateralFilter(expected, 5, 50, 100) # smoothing filter expected = cv2.flip(expected, 1) frame_handler = FrameHandler() frame_handler.logger.setLevel(logging.DEBUG) # run # range [-1, 1] with a value of one being a “perfect match”. frame_handler.input_frame = test_image ssim = ImageTestTool.compare_imaged(frame_handler.input_frame, expected) # print("SSIM: {}".format(ssim)) assert ssim >= 0.95
def __init__(self): self.logger = logging.getLogger('face_processor_handler') self.logger.setLevel(logging.INFO) self._face_detector = cv2.CascadeClassifier( utils.get_full_path('hallopy/config/haarcascade_frontalface_default.xml')) self._face_padding_x = 20 self._face_padding_y = 60 self._preprocessed_input_frame = None
def detect_faces(img): """Function for detecting faces. :returns faces: array with detected faces coordination's. """ face_detector = cv2.CascadeClassifier( utils.get_full_path( 'hallopy/config/haarcascade_frontalface_default.xml')) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return face_detector.detectMultiScale(gray, 1.3, 5)
def test_extract_center_of_mass(self): """Test if extract find center of mass. """ # setup test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg') test_image = cv2.imread(test_path) expected_path = utils.get_full_path( 'docs/material_for_testing/back_ground_removed_and_center_of_mass_discovered.jpg') expected_image = cv2.imread(expected_path) # Because image loaded from local, and not received from web-cam, a flip is needed. test_image = cv2.flip(test_image, 1) # todo: use mockito here to mock detector flags_handler = FlagsHandler() detector = Detector(flags_handler) extractor = Extractor(flags_handler) detector.input_frame_for_feature_extraction = test_image # run extractor.extract = detector result_image = test_image.copy() cv2.circle(result_image, extractor.palm_center_point, 5, (255, 0, 0), thickness=5) ssim = ImageTestTool.compare_imaged(result_image, expected_image) # print("SSIM: {}".format(ssim)) assert ssim >= 0.95
def test_find_largest_contours(self): """Test if largest contours is found. """ # setup test_path = utils.get_full_path( 'docs/material_for_testing/back_ground_removed_frame.jpg') test_image = cv2.imread(test_path) # Because image loaded from local, and not received from web-cam, a flip is needed. test_image = cv2.flip(test_image, 1) test_image = cv2.bitwise_not(test_image) max_area_contour = ImageTestTool.get_max_area_contour(test_image) expected_area = ImageTestTool.get_contour_area(max_area_contour) # Create detector flags_handler = FlagsHandler() detector = Detector(flags_handler) # run detector.input_frame_for_feature_extraction = test_image result_area = cv2.contourArea(detector.max_area_contour) assert result_area == expected_area
def test_face_covered_frame(self): """Test if faces are detected and covered. """ # setup test_path = utils.get_full_path( 'docs/material_for_testing/face_and_hand.jpg') test_image = cv2.imread(test_path) expected = test_image.copy() expected_faces = ImageTestTool.detect_faces(expected) ImageTestTool.draw_black_recs(expected, expected_faces) face_processor = FaceProcessor() face_processor.logger.setLevel(logging.DEBUG) # Insert image with face. face_processor.face_covered_frame = expected # run ssim = ImageTestTool.compare_imaged(face_processor.face_covered_frame, expected) # print("SSIM: {}".format(ssim)) assert ssim >= 0.93
def test_contour_extreme_point_tracking(self): """Test for tracking extreme_points without optical flow (e.g until calibrated). """ # setup test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg') test_image = cv2.imread(test_path) # todo: use mockito here to mock preprocessing elements flags_handler = FlagsHandler() detector = Detector(flags_handler) extractor = Extractor(flags_handler) # Background model preparations. bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) cap = cv2.VideoCapture(0) while flags_handler.quit_flag is False: ret, frame = cap.read() frame = cv2.flip(frame, 1) # Remove background from input frame. fgmask = bg_model.apply(frame, learningRate=0) kernel = np.ones((3, 3), np.uint8) fgmask = cv2.erode(fgmask, kernel, iterations=1) res = cv2.bitwise_and(frame, frame, mask=fgmask) # Clip frames ROI. back_ground_removed_clipped = ImageTestTool.clip_roi(res, {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6}) if flags_handler.background_capture_required is True: bg_model = cv2.createBackgroundSubtractorMOG2(0, 50) flags_handler.background_capture_required = False detector.input_frame_for_feature_extraction = back_ground_removed_clipped extractor.extract = detector image = extractor.get_drawn_extreme_contour_points() cv2.imshow('test_contour_extreme_point_tracking', image) flags_handler.keyboard_input = cv2.waitKey(1)
def test_draw_axes(self): """Test if detected_out_put_center calculated properly. """ # setup test_path = utils.get_full_path( 'docs/material_for_testing/back_ground_removed_frame.jpg') test_image = cv2.imread(test_path) # Because image loaded from local, and not received from web-cam, a flip is needed. test_image = cv2.flip(test_image, 1) expected = test_image.copy() # Create detector flags_handler = FlagsHandler() detector = Detector(flags_handler) expected_detected_out_put_center = (int(expected.shape[1] / 2), int(expected.shape[0] / 2) + detector.horiz_axe_offset) # run detector.input_frame_for_feature_extraction = test_image cv2.imshow('expected', expected) cv2.imshow('result', detector.input_frame_for_feature_extraction) cv2.waitKey() assert expected_detected_out_put_center == detector.detected_out_put_center
def test_get_contour_extreme_point(self): """Test if middle finger edge was found correctly. """ # setup test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg') test_image = cv2.imread(test_path) max_area_contour = ImageTestTool.get_max_area_contour(test_image) expected_extLeft, expected_extRight, expected_extTop, expected_extBot = ImageTestTool.get_contour_extreme_points( max_area_contour) # todo: use mockito here to mock detector flags_handler = FlagsHandler() detector = Detector(flags_handler) extractor = Extractor(flags_handler) detector.input_frame_for_feature_extraction = test_image # run extractor.extract = detector assert expected_extLeft == extractor.ext_left assert expected_extRight == extractor.ext_right assert expected_extTop == extractor.ext_top assert expected_extBot == extractor.ext_bot