Esempio n. 1
0
class feature_homography_test(NewOpenCVTests):

    render = None
    tracker = None
    framesCounter = 0
    frame = None

    def test_feature_homography(self):

        self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'),
            self.get_sample('samples/data/box.png'), noise = 0.5, speed = 0.5)
        self.frame = self.render.getNextFrame()
        self.tracker = PlaneTracker()
        self.tracker.clear()
        self.tracker.add_target(self.frame, self.render.getCurrentRect())

        while self.framesCounter < 100:
            self.framesCounter += 1
            tracked = self.tracker.track(self.frame)
            if len(tracked) > 0:
                tracked = tracked[0]
                self.assertGreater(intersectionRate(self.render.getCurrentRect(), np.int32(tracked.quad)), 0.6)
            else:
                self.assertEqual(0, 1, 'Tracking error')
            self.frame = self.render.getNextFrame()
Esempio n. 2
0
class feature_homography_test(NewOpenCVTests):

    render = None
    tracker = None
    framesCounter = 0
    frame = None

    def test_feature_homography(self):

        self.render = TestSceneRender(
            self.get_sample('samples/data/graf1.png'),
            self.get_sample('samples/data/box.png'),
            noise=0.5,
            speed=0.5)
        self.frame = self.render.getNextFrame()
        self.tracker = PlaneTracker()
        self.tracker.clear()
        self.tracker.add_target(self.frame, self.render.getCurrentRect())

        while self.framesCounter < 100:
            self.framesCounter += 1
            tracked = self.tracker.track(self.frame)
            if len(tracked) > 0:
                tracked = tracked[0]
                self.assertGreater(
                    intersectionRate(self.render.getCurrentRect(),
                                     np.int32(tracked.quad)), 0.6)
            else:
                self.assertEqual(0, 1, 'Tracking error')
            self.frame = self.render.getNextFrame()
    def test_lk_homography(self):
        self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'),
            self.get_sample('samples/c/box.png'), noise = 0.1, speed = 1.0)

        frame = self.render.getNextFrame()
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        self.frame0 = frame.copy()
        self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)

        isForegroundHomographyFound = False

        if self.p0 is not None:
            self.p1 = self.p0
            self.gray0 = frame_gray
            self.gray1 = frame_gray
            currRect = self.render.getCurrentRect()
            for (x,y) in self.p0[:,0]:
                if isPointInRect((x,y), currRect):
                    self.numFeaturesInRectOnStart += 1

        while self.framesCounter < 200:
            self.framesCounter += 1
            frame = self.render.getNextFrame()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if self.p0 is not None:
                p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)

                self.p1 = p2[trace_status].copy()
                self.p0 = self.p0[trace_status].copy()
                self.gray1 = frame_gray

                if len(self.p0) < 4:
                    self.p0 = None
                    continue
                H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0)

                goodPointsInRect = 0
                goodPointsOutsideRect = 0
                for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
                    if good:
                        if isPointInRect((x1,y1), self.render.getCurrentRect()):
                            goodPointsInRect += 1
                        else: goodPointsOutsideRect += 1

                if goodPointsOutsideRect < goodPointsInRect:
                    isForegroundHomographyFound = True
                    self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6)
            else:
                p = cv2.goodFeaturesToTrack(frame_gray, **feature_params)

        self.assertEqual(isForegroundHomographyFound, True)
Esempio n. 4
0
class Cube(VideoSynthBase):
    def __init__(self, **kw):
        super(Cube, self).__init__(**kw)
        self.render = TestSceneRender(cv.imread('../data/pca_test1.jpg'), deformation = True,  speed = 1)

    def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3)
Esempio n. 5
0
class Cube(VideoSynthBase):
    def __init__(self, **kw):
        super(Cube, self).__init__(**kw)
        self.render = TestSceneRender(cv2.imread('../data/pca_test1.jpg'), deformation = True,  speed = 1)

    def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)
Esempio n. 6
0
class Book(VideoSynthBase):
    def __init__(self, **kw):
        super(Book, self).__init__(**kw)
        backGr = cv.imread('../data/graf1.png')
        fgr = cv.imread('../data/box.png')
        self.render = TestSceneRender(backGr, fgr, speed = 1)

    def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3)
Esempio n. 7
0
class Book(VideoSynthBase):
    def __init__(self, **kw):
        super(Book, self).__init__(**kw)
        backGr = cv2.imread('../data/graf1.png')
        fgr = cv2.imread('../data/box.png')
        self.render = TestSceneRender(backGr, fgr, speed = 1)

    def read(self, dst=None):
        noise = np.zeros(self.render.sceneBg.shape, np.int8)
        cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)

        return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)
Esempio n. 8
0
 def __init__(self, **kw):
     super(Book, self).__init__(**kw)
     backGr = cv.imread(cv.samples.findFile('graf1.png'))
     fgr = cv.imread(cv.samples.findFile('box.png'))
     self.render = TestSceneRender(backGr, fgr, speed = 1)
Esempio n. 9
0
 def __init__(self, **kw):
     super(Book, self).__init__(**kw)
     backGr = cv.imread(cv.samples.findFile('graf1.png'))
     fgr = cv.imread(cv.samples.findFile('box.png'))
     self.render = TestSceneRender(backGr, fgr, speed=1)
Esempio n. 10
0
 def __init__(self, **kw):
     super(Cube, self).__init__(**kw)
     self.render = TestSceneRender(cv.imread(
         cv.samples.findFile('pca_test1.jpg')),
                                   deformation=True,
                                   speed=1)
Esempio n. 11
0
 def __init__(self, **kw):
     super(Cube, self).__init__(**kw)
     self.render = TestSceneRender(cv.imread('../data/pca_test1.jpg'), deformation = True,  speed = 1)
Esempio n. 12
0
class camshift_test(NewOpenCVTests):

    framesNum = 300
    frame = None
    selection = None
    drag_start = None
    show_backproj = False
    track_window = None
    render = None
    errors = 0

    def prepareRender(self):

        self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), deformation = True)

    def runTracker(self):

        framesCounter = 0
        self.selection = True

        xmin, ymin, xmax, ymax = self.render.getCurrentRect()

        self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)

        while True:
            framesCounter += 1
            self.frame = self.render.getNextFrame()
            hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
            mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

            if self.selection:
                x0, y0, x1, y1 = self.render.getCurrentRect() + 50
                x0 -= 100
                y0 -= 100

                hsv_roi = hsv[y0:y1, x0:x1]
                mask_roi = mask[y0:y1, x0:x1]
                hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
                cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX)
                self.hist = hist.reshape(-1)
                self.selection = False

            if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
                self.selection = None
                prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
                prob &= mask
                term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
                _track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit)

            trackingRect = np.array(self.track_window)
            trackingRect[2] += trackingRect[0]
            trackingRect[3] += trackingRect[1]

            if intersectionRate(self.render.getCurrentRect(), trackingRect) < 0.4:
                self.errors += 1

            if framesCounter > self.framesNum:
                break

        self.assertLess(float(self.errors) / self.framesNum, 0.4)

    def test_camshift(self):
        self.prepareRender()
        self.runTracker()
Esempio n. 13
0
    def test_lk_track(self):

        self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'), self.get_sample('samples/c/box.png'))
        self.runTracker()
Esempio n. 14
0
    def prepareRender(self):

        self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), deformation = True)
Esempio n. 15
0
class lk_track_test(NewOpenCVTests):

    track_len = 10
    detect_interval = 5
    tracks = []
    frame_idx = 0
    render = None

    def test_lk_track(self):

        self.render = TestSceneRender(
            self.get_sample('samples/data/graf1.png'),
            self.get_sample('samples/data/box.png'))
        self.runTracker()

    def runTracker(self):
        foregroundPointsNum = 0

        while True:
            frame = self.render.getNextFrame()
            frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1][0]
                                 for tr in self.tracks]).reshape(-1, 1, 2)
                p1, _st, _err = cv.calcOpticalFlowPyrLK(
                    img0, img1, p0, None, **lk_params)
                p0r, _st, _err = cv.calcOpticalFlowPyrLK(
                    img1, img0, p1, None, **lk_params)
                d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks,
                                                 p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append([(x, y), self.frame_idx])
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                self.tracks = new_tracks

            if self.frame_idx % self.detect_interval == 0:
                goodTracksCount = 0
                for tr in self.tracks:
                    oldRect = self.render.getRectInTime(self.render.timeStep *
                                                        tr[0][1])
                    newRect = self.render.getRectInTime(self.render.timeStep *
                                                        tr[-1][1])
                    if isPointInRect(tr[0][0], oldRect) and isPointInRect(
                            tr[-1][0], newRect):
                        goodTracksCount += 1

                if self.frame_idx == self.detect_interval:
                    foregroundPointsNum = goodTracksCount

                fgIndex = float(foregroundPointsNum) / (foregroundPointsNum +
                                                        1)
                fgRate = float(goodTracksCount) / (len(self.tracks) + 1)

                if self.frame_idx > 0:
                    self.assertGreater(fgIndex, 0.9)
                    self.assertGreater(fgRate, 0.2)

                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]:
                    cv.circle(mask, (x, y), 5, 0, -1)
                p = cv.goodFeaturesToTrack(frame_gray,
                                           mask=mask,
                                           **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([[(x, y), self.frame_idx]])

            self.frame_idx += 1
            self.prev_gray = frame_gray

            if self.frame_idx > 300:
                break
Esempio n. 16
0
 def __init__(self, **kw):
     super(Cube, self).__init__(**kw)
     self.render = TestSceneRender(cv2.imread('../data/pca_test1.jpg'),
                                   deformation=True,
                                   speed=1)
Esempio n. 17
0
 def __init__(self, **kw):
     super(Cube, self).__init__(**kw)
     self.render = TestSceneRender(cv.imread(cv.samples.findFile('pca_test1.jpg')), deformation = True,  speed = 1)
    def prepareRender(self):

        self.render = TestSceneRender(
            self.get_sample('samples/python2/data/pca_test1.jpg'),
            deformation=True)
class camshift_test(NewOpenCVTests):

    framesNum = 300
    frame = None
    selection = None
    drag_start = None
    show_backproj = False
    track_window = None
    render = None
    errors = 0

    def prepareRender(self):

        self.render = TestSceneRender(
            self.get_sample('samples/python2/data/pca_test1.jpg'),
            deformation=True)

    def runTracker(self):

        framesCounter = 0
        self.selection = True

        xmin, ymin, xmax, ymax = self.render.getCurrentRect()

        self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)

        while True:
            framesCounter += 1
            self.frame = self.render.getNextFrame()
            hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, np.array((0., 60., 32.)),
                               np.array((180., 255., 255.)))

            if self.selection:
                x0, y0, x1, y1 = self.render.getCurrentRect() + 50
                x0 -= 100
                y0 -= 100

                hsv_roi = hsv[y0:y1, x0:x1]
                mask_roi = mask[y0:y1, x0:x1]
                hist = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180])
                cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
                self.hist = hist.reshape(-1)
                self.selection = False

            if self.track_window and self.track_window[
                    2] > 0 and self.track_window[3] > 0:
                self.selection = None
                prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
                prob &= mask
                term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                             10, 1)
                track_box, self.track_window = cv2.CamShift(
                    prob, self.track_window, term_crit)

            trackingRect = np.array(self.track_window)
            trackingRect[2] += trackingRect[0]
            trackingRect[3] += trackingRect[1]

            if intersectionRate(self.render.getCurrentRect(),
                                trackingRect) < 0.4:
                self.errors += 1

            if framesCounter > self.framesNum:
                break

        self.assertLess(float(self.errors) / self.framesNum, 0.4)

    def test_camshift(self):
        self.prepareRender()
        self.runTracker()
Esempio n. 20
0
 def __init__(self, **kw):
     super(Book, self).__init__(**kw)
     backGr = cv2.imread('../data/graf1.png')
     fgr = cv2.imread('../data/box.png')
     self.render = TestSceneRender(backGr, fgr, speed=1)
Esempio n. 21
0
 def __init__(self, **kw):
     super(Book, self).__init__(**kw)
     backGr = cv.imread('../data/graf1.png')
     fgr = cv.imread('../data/box.png')
     self.render = TestSceneRender(backGr, fgr, speed = 1)
Esempio n. 22
0
    def test_lk_track(self):

        self.render = TestSceneRender(
            self.get_sample('samples/data/graf1.png'),
            self.get_sample('samples/data/box.png'))
        self.runTracker()
Esempio n. 23
0
class lk_track_test(NewOpenCVTests):

    track_len = 10
    detect_interval = 5
    tracks = []
    frame_idx = 0
    render = None

    def test_lk_track(self):

        self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'), self.get_sample('samples/c/box.png'))
        self.runTracker()

    def runTracker(self):
        foregroundPointsNum = 0

        while True:
            frame = self.render.getNextFrame()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if len(self.tracks) > 0:
                img0, img1 = self.prev_gray, frame_gray
                p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2)
                p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                d = abs(p0-p0r).reshape(-1, 2).max(-1)
                good = d < 1
                new_tracks = []
                for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                    if not good_flag:
                        continue
                    tr.append([(x, y), self.frame_idx])
                    if len(tr) > self.track_len:
                        del tr[0]
                    new_tracks.append(tr)
                self.tracks = new_tracks

            if self.frame_idx % self.detect_interval == 0:
                goodTracksCount = 0
                for tr in self.tracks:
                    oldRect = self.render.getRectInTime(self.render.timeStep * tr[0][1])
                    newRect = self.render.getRectInTime(self.render.timeStep * tr[-1][1])
                    if isPointInRect(tr[0][0], oldRect) and isPointInRect(tr[-1][0], newRect):
                        goodTracksCount += 1

                if self.frame_idx == self.detect_interval:
                    foregroundPointsNum = goodTracksCount

                fgIndex = float(foregroundPointsNum) / (foregroundPointsNum + 1)
                fgRate = float(goodTracksCount) / (len(self.tracks) + 1)

                if self.frame_idx > 0:
                    self.assertGreater(fgIndex, 0.9)
                    self.assertGreater(fgRate, 0.2)

                mask = np.zeros_like(frame_gray)
                mask[:] = 255
                for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)
                p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
                if p is not None:
                    for x, y in np.float32(p).reshape(-1, 2):
                        self.tracks.append([[(x, y), self.frame_idx]])

            self.frame_idx += 1
            self.prev_gray = frame_gray

            if self.frame_idx > 300:
                break