def test_affine_to_np_uses_the_first_array_of_affine_and_gets_the_first_two_rows_of_affine_opencv4(self): OpencvDetectorInterface.OPENCV_MAJOR = "4" interface = OpencvDetectorInterface() affine = (np.array([(1,2,3),(4,5,6)]), np.array([[7],[8],[9],[10],[11],[12]])) new = interface.affine_to_np_array(affine) self.assertIn(affine[0][0,:], new[0]) self.assertIn(affine[0][1,:], new[1])
def test_ORB_is_not_called_for_ORB_detector_in_opencv3(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = 3 detector_name = 'ORB' adaptation = Mock() interface = OpencvDetectorInterface() interface.feature_detector_create(detector_name, adaptation) cv2_mock.assert_called_once_with(adaptation)
def test_affine_to_np_uses_the_firts_two_rows_of_affine_for_opencv2(self): OpencvDetectorInterface.OPENCV_MAJOR = "2" interface = OpencvDetectorInterface() affine = [np.array([1,2,3]), np.array([4,5,6])] #this is the affine array format used inopencv2 new = interface.affine_to_np_array(affine) self.assertIn(affine[0], new[0]) self.assertIn(affine[1], new[1])
def test_SimpleBlobDetector_create_called_for_SimpleBlob_in_opencv3(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "3" detector_name = 'SimpleBlob' adaptation = Mock() interface = OpencvDetectorInterface() interface.feature_detector_create(detector_name, adaptation) cv2_mock.assert_called_once_with()
def test_estimate_rigid_transform_triggers_estimateRigidTransform_for_opencv2(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "2" interface = OpencvDetectorInterface() img1 = Mock() img2 = Mock() interface.estimate_rigid_transform(img1, img2, True) cv2_mock.assert_called_once_with(img1, img2, fullAffine=True)
def test_estimate_rigid_transform_triggers_estimateAffinePartial2D_for_opencv4_when_use_full_false(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "4" interface = OpencvDetectorInterface() img1 = Mock() img2 = Mock() interface.estimate_rigid_transform(img1, img2, False) cv2_mock.assert_called_once_with(img1, img2)
def test_BriefDescriptorExtractor_create_called_for_opencv3_if_detector_on_LIST_WITHOUT_EXTRACTORS(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "3" extractor = Mock() interface = OpencvDetectorInterface() interface.create_extractor(extractor, 'FAST') cv2_mock.BriefDescriptorExtractor_create.assert_called_once_with() self.assertIsNotNone(cv2_mock.BriefDescriptorExtractor_create)
def test_BRISK_called_for_other_in_opencv3(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "3" detector_name = 'BRISK' adaptation = Mock() interface = OpencvDetectorInterface() interface.feature_detector_create(detector_name, adaptation) cv2_mock.assert_called_once_with(adaptation)
def test_HarrisLaplaceFeatureDetector_create_called_for_HARRIS_in_opencv3(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "3" detector_name = 'HARRIS' adaptation = Mock() interface = OpencvDetectorInterface() interface.feature_detector_create(detector_name, adaptation) cv2_mock.HarrisLaplaceFeatureDetector_create.assert_called_once_with()
def test_FeatureDetector_create_is_not_called_for_opencv3(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "3" detector_name = 'ORB' adaptation = 10 interface = OpencvDetectorInterface() interface.feature_detector_create(detector_name, adaptation) cv2_mock.assert_not_called()
def test_FeatureDetector_create_is_called_for_opencv2(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "2" detector_name = 'ORB' adaptation = '' name = adaptation+detector_name interface = OpencvDetectorInterface() interface.feature_detector_create(detector_name, adaptation) cv2_mock.assert_called_once_with(name)
def test_compute_always_calls_compute_on_extractor_for_opecv2(self): OpencvDetectorInterface.OPENCV_MAJOR = "2" image = Mock() keypoints = Mock() extractor = Mock() extractor.compute = Mock(return_value = ([],[])) interface = OpencvDetectorInterface() interface.compute(image, keypoints, extractor, Mock()) extractor.compute.assert_called_once_with(image, keypoints)
def test_opecv3_when_extractor_is_none_than_compute_called_on_detector(self): OpencvDetectorInterface.OPENCV_MAJOR = "3" image = Mock() keypoints = Mock() extractor = None detector = Mock() #extractor.compute = Mock(return_value=([], [])) detector.compute = Mock(return_value=([], [])) interface = OpencvDetectorInterface() interface.compute(image, keypoints, extractor, detector) detector.compute.assert_called_once_with(image, keypoints)
def _create_default_extractor(extractor, detector_name): """ Note: BRISK uses 64 integers, all others are arrays of 32 ints (in range 0 to 255). """ extractor = OpencvDetectorInterface().create_extractor( extractor, detector_name) return extractor
def _create_detector(self): constructor = OpencvDetectorInterface().brisk_constructor() detector = constructor(thresh=self._thresh, octaves=self._octaves, patternScale=self._pattern_scale) return detector
def _calculate_affine_transform(self, matches): """ Note: internally, estimateRigidTransform uses some sort of RANSAC method as a filter, but with hardcoded (and not very good) parameters. """ transform = None matches, mask = self._pre_filter(matches) if self._has_enough_matches_for_transform(matches): image1_pts, image2_pts = self._get_np_points(matches) use_full = self._method == self.AFFINE_FULL affine = OpencvDetectorInterface().estimate_rigid_transform( image1_pts, image2_pts, use_full) if affine is not None: affine_array = OpencvDetectorInterface().affine_to_np_array( affine) transform = AffineTransformation(affine_array) return transform, mask
def _create_detector(self): constructor = OpencvDetectorInterface().orb_constructor() detector = constructor(nfeatures=self._n_features, scaleFactor=self._scale_factor, nlevels=self._n_levels, edgeThreshold=self._edge_threshold, firstLevel=self._first_level, WTA_K=self._wta_k, scoreType=self._score(), patchSize=self._patch_size) return detector
def detect_features(self, image): """ Detect interesting features in the image and generate descriptors. A keypoint identifies the location and orientation of a feature, and a descriptor is a vector of numbers that describe the various attributes of the feature. By generating descriptors, we can compare the set of features on two images and find matches between them. """ detector = self._create_detector() keypoints = detector.detect(image.raw(), None) extractor = self._create_extractor( ) # not good creates an object which is not always used keypoints, descriptors = OpencvDetectorInterface().compute( image.raw(), keypoints, extractor, detector) features = [] if descriptors is None: return features for kp, descriptor in zip(keypoints, descriptors): feature = Feature(kp, descriptor) features.append(feature) return features
def test_for_opencv3_create_extractor_returns_none_if_detector_not_on_LIST_WITHOUT_EXTRACTORS(self): OpencvDetectorInterface.OPENCV_MAJOR = "3" interface = OpencvDetectorInterface() extractor = interface.create_extractor(Mock(), 'ORB') self.assertIsNone(extractor)
def test_brisk_constructor_returns_BRISK_for_opencv2(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "2" interface = OpencvDetectorInterface() constructor = interface.brisk_constructor() self.assertEqual(constructor, cv2_mock)
def _create_default_detector(detector, adaptation): """ Create a detector of the specified type with all the default parameters""" detector = OpencvDetectorInterface().feature_detector_create( detector, adaptation) return detector
def _default_normalization(): """ Keypoint normalization type for the detector method; used for matching. """ return OpencvDetectorInterface().get_hamming_norm()
def test_brisk_constructor_returns_ORB_create_for_opencv3(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "3" interface = OpencvDetectorInterface() constructor = interface.orb_constructor() self.assertEqual(constructor, cv2_mock)
def test_DescriptorExtractor_create_called_for_opencv2(self, cv2_mock): OpencvDetectorInterface.OPENCV_MAJOR = "2" extractor = Mock() interface = OpencvDetectorInterface() interface.create_extractor(extractor, 'ORB') cv2_mock.assert_called_once_with(extractor)