コード例 #1
0
ファイル: VidStab.py プロジェクト: nlb4vd/Video
    def __init__(self, kp_method='GFTT', *args, **kwargs):
        """instantiate VidStab class

        :param kp_method: String of the type of keypoint detector to use. Available options:
                        ["GFTT", "BRISK", "DENSE", "FAST", "HARRIS",
                         "MSER", "ORB", "SIFT", "SURF", "STAR"]
        :param args: Positional arguments for keypoint detector.
        :param kwargs: Keyword arguments for keypoint detector.
        """
        self.kp_method = kp_method
        # use original defaults in http://nghiaho.com/?p=2093 if GFTT with no additional (kw)args
        if kp_method == 'GFTT' and args == () and kwargs == {}:
            self.kp_detector = kp_factory.FeatureDetector_create(
                'GFTT',
                maxCorners=200,
                qualityLevel=0.01,
                minDistance=30.0,
                blockSize=3)
        else:
            self.kp_detector = kp_factory.FeatureDetector_create(
                kp_method, *args, **kwargs)

        self.trajectory = None
        self.smoothed_trajectory = None
        self.transforms = None
        self._raw_transforms = None
コード例 #2
0
    def __init__(self, kp_method='GFTT', processing_max_dim=float('inf'), *args, **kwargs):
        """instantiate VidStab class

        :param kp_method: String of the type of keypoint detector to use. Available options are:
                        ``["GFTT", "BRISK", "DENSE", "FAST", "HARRIS", "MSER", "ORB", "STAR"]``.
                        ``["SIFT", "SURF"]`` are additional non-free options available depending
                        on your build of OpenCV.  The non-free detectors are not tested with this package.
        :param processing_max_dim: Working with large frames can harm performance (especially in live video).
                                   Setting this parameter can restrict frame size while processing.
                                   The outputted frames will remain the original size.

                                   For example:
                                     * If an input frame shape is `(200, 400, 3)` and `processing_max_dim` is
                                   100.  The frame will be resized to `(50, 100, 3)` before processing.
                                     * If an input frame shape is `(400, 200, 3)` and `processing_max_dim` is
                                   100.  The frame will be resized to `(100, 50, 3)` before processing.
                                     * If an input frame shape is `(50, 50, 3)` and `processing_max_dim` is
                                   100.  The frame be unchanged for processing.

        :param args: Positional arguments for keypoint detector.
        :param kwargs: Keyword arguments for keypoint detector.
        """

        self.kp_method = kp_method
        # use original defaults in http://nghiaho.com/?p=2093 if GFTT with no additional (kw)args
        if kp_method == 'GFTT' and args == () and kwargs == {}:
            self.kp_detector = kp_factory.FeatureDetector_create('GFTT',
                                                                 maxCorners=200,
                                                                 qualityLevel=0.01,
                                                                 minDistance=30.0,
                                                                 blockSize=3)
        else:
            self.kp_detector = kp_factory.FeatureDetector_create(kp_method, *args, **kwargs)

        self.processing_max_dim = processing_max_dim
        self._processing_resize_kwargs = {}

        self._smoothing_window = 30
        self._raw_transforms = []
        self._trajectory = []
        self.trajectory = self.smoothed_trajectory = self.transforms = None

        self.frame_queue = FrameQueue()
        self.prev_kps = self.prev_gray = None

        self.writer = None

        self.layer_options = {
            'layer_func': None,
            'prev_frame': None
        }

        self.border_options = {}
        self.auto_border_flag = False
        self.extreme_frame_corners = {'min_x': 0, 'min_y': 0, 'max_x': 0, 'max_y': 0}
        self.frame_corners = None

        self._default_stabilize_frame_output = None
コード例 #3
0
    def __init__(self, kp_method='GFTT', *args, **kwargs):
        """instantiate VidStab class

        :param kp_method: String of the type of keypoint detector to use. Available options are:
                        ``["GFTT", "BRISK", "DENSE", "FAST", "HARRIS", "MSER", "ORB", "STAR"]``.
                        ``["SIFT", "SURF"]`` are additional non-free options available depending
                        on your build of OpenCV.  The non-free detectors are not tested with this package.
        :param args: Positional arguments for keypoint detector.
        :param kwargs: Keyword arguments for keypoint detector.
        """

        self.kp_method = kp_method
        # use original defaults in http://nghiaho.com/?p=2093 if GFTT with no additional (kw)args
        if kp_method == 'GFTT' and args == () and kwargs == {}:
            self.kp_detector = kp_factory.FeatureDetector_create(
                'GFTT',
                maxCorners=200,
                qualityLevel=0.01,
                minDistance=30.0,
                blockSize=3)
        else:
            self.kp_detector = kp_factory.FeatureDetector_create(
                kp_method, *args, **kwargs)

        self._smoothing_window = 30
        self._raw_transforms = []
        self._trajectory = []
        self.trajectory = self.smoothed_trajectory = self.transforms = None

        self.frame_queue = FrameQueue()
        self.prev_kps = self.prev_gray = None

        self.writer = None

        self.layer_options = {'layer_func': None, 'prev_frame': None}

        self.border_options = {}
        self.auto_border_flag = False
        self.extreme_frame_corners = {
            'min_x': 0,
            'min_y': 0,
            'max_x': 0,
            'max_y': 0
        }
        self.frame_corners = None

        self._default_stabilize_frame_output = None
コード例 #4
0
    def __init__(self,
                 kp_method='GFTT',
                 processing_max_dim=float('inf'),
                 *args,
                 **kwargs):
        self.kp_method = kp_method
        if kp_method == 'GFTT' and args == () and kwargs == {}:
            self.kp_detector = kp_factory.FeatureDetector_create(
                'GFTT',
                maxCorners=200,
                qualityLevel=0.01,
                minDistance=30.0,
                blockSize=3)
        else:
            self.kp_detector = kp_factory.FeatureDetector_create(
                kp_method, *args, **kwargs)

        self.processing_max_dim = processing_max_dim
        self._processing_resize_kwargs = {}

        self._smoothing_window = 30
        self._raw_transforms = []
        self._trajectory = []
        self.trajectory = self.smoothed_trajectory = self.transforms = None

        self.frame_queue = FrameQueue()

        self.prev_kps = self.prev_gray = None

        self.writer = None

        self.layer_options = {'layer_func': None, 'prev_frame': None}

        self.border_options = {}
        self.auto_border_flag = False
        self.extreme_frame_corners = {
            'min_x': 0,
            'min_y': 0,
            'max_x': 0,
            'max_y': 0
        }
        self.frame_corners = None

        self._default_stabilize_frame_output = None
コード例 #5
0
    def __init__(self, query_image, ar_image, min_match_count=10):
        self.query_image = query_image
        self.ar_image = ar_image
        self.min_match_count = min_match_count

        self.query_corners = self.__get_image_corners(self.query_image)
        self.ar_corners = self.__get_image_corners(self.ar_image)

        self.query_gray = cv2.cvtColor(self.query_image, cv2.COLOR_BGR2GRAY)
        self.sift = kp_factory.FeatureDetector_create('SIFT')
        self.query_kps, self.query_kp_desc = self.sift.detectAndCompute(self.query_gray, None)

        self.flann = cv2.FlannBasedMatcher({'algorithm': 0, 'trees': 5}, {'checks': 50})
コード例 #6
0
import pytest
import numpy as np
import cv2
import imutils.feature.factories as kp_factory

from vidstab.frame import Frame
import vidstab.vidstab_utils as utils

kp_detector = kp_factory.FeatureDetector_create('GFTT')
frame_1 = np.zeros((200, 200, 3), dtype='uint8')
frame_2 = np.zeros((200, 200, 3), dtype='uint8')

cv2.rectangle(frame_1, (20, 50), (100, 100), (255, 0, 0), -1)
cv2.rectangle(frame_2, (50, 80), (130, 130), (255, 0, 0), -1)

frame_1_gray = cv2.cvtColor(frame_1, cv2.COLOR_BGR2GRAY)
frame_2_gray = cv2.cvtColor(frame_2, cv2.COLOR_BGR2GRAY)
frame_1_kps = kp_detector.detect(frame_1_gray)
# noinspection PyArgumentList
frame_1_kps = np.array([kp.pt for kp in frame_1_kps], dtype='float32').reshape(-1, 1, 2)

optical_flow = cv2.calcOpticalFlowPyrLK(frame_1_gray,
                                        frame_2_gray,
                                        frame_1_kps,
                                        None)


def test_build_transformation_matrix():
    expected = np.array([[-0.83907153, -0.54402111,  1.0],
                         [0.54402111, -0.83907153,  2.0]])
    assert np.allclose(utils.build_transformation_matrix([1, 2, -10]), expected)
コード例 #7
0
ファイル: index_features.py プロジェクト: karanjsingh/CBIR
                type=int,
                default=500,
                help="Approximate # of images in the dataset")
ap.add_argument(
    "-b",
    "--maxbuffersize",
    type=int,
    default=50000,
    help=
    "Maximum buffer size for number of features of images stored in the memory"
)
args = vars(ap.parse_args())

# initialise the keypoint detector, local invariant descriptor,
# and the descriptor pipeline
detector = factories.FeatureDetector_create("SURF")
descriptor = factories.DescriptorExtractor_create("RootSIFT")
dad = detectanddescribe.DetectAndDescribe(detector, descriptor)

# initialize the feature indexer
fi = featureindexer.FeatureIndexer(args["featuresdb"],
                                   estNumImages=args["approximages"],
                                   maxBufferSize=args["maxbuffersize"],
                                   verbose=True)

#loop over theimages in the dataset
for (i, imagePath) in enumerate(list(paths.list_images(args["dataset"]))):
    # chech to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
コード例 #8
0
import cv2
import numpy as np
import imutils.feature.factories as kp_factory

DETECTOR = kp_factory.FeatureDetector_create('GFTT')
DESCRIPTOR = kp_factory.DescriptorExtractor_create('BRIEF')
MATCHER = kp_factory.DescriptorMatcher_create('BruteForce')


def detect_and_describe(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)

    kps = DETECTOR.detect(gray)
    kps, features = DESCRIPTOR.compute(image, kps)

    kps = np.float32([kp.pt for kp in kps])

    return kps, features


def match_keypoints(kps_a,
                    kps_b,
                    features_a,
                    features_b,
                    ratio=0.75,
                    reproj_thresh=4.0):
    raw_matches = MATCHER.knnMatch(features_a, features_b, 2)

    matches = []
    for m in raw_matches:
        # Lowe's ratio test to include match