Example #1
0
 def __init__(self, video_src, paused=False):
     self.cap = video.create_capture(video_src)
     _, self.frame = self.cap.read()
     cv.imshow('frame', self.frame)
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = []
     self.paused = paused
Example #2
0
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Example #3
0
    def __init__(self, video_src):
        self.cam = video.create_capture(video_src, presets['cube'])
        _ret, self.frame = self.cam.read()
        cv.namedWindow('camshift')
        cv.setMouseCallback('camshift', self.onmouse)

        self.selection = None
        self.drag_start = None
        self.show_backproj = False
        self.track_window = None
Example #4
0
    args, video_src = getopt.getopt(sys.argv[1:], '',
                                    ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    cascade_fn = args.get(
        '--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
    nested_fn = args.get('--nested-cascade',
                         "../../data/haarcascades/haarcascade_eye.xml")

    cascade = cv.CascadeClassifier(cascade_fn)
    nested = cv.CascadeClassifier(nested_fn)

    cam = create_capture(video_src,
                         fallback='synth:bg=../data/lena.jpg:noise=0.05')

    while True:
        ret, img = cam.read()
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        gray = cv.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))
        if not nested.empty():
            for x1, y1, x2, y2 in rects:
                roi = gray[y1:y2, x1:x2]
                vis_roi = vis[y1:y2, x1:x2]
                subrects = detect(roi.copy(), nested)
    hsv_map = cv.cvtColor(hsv_map, cv.COLOR_HSV2BGR)
    cv.imshow('hsv_map', hsv_map)

    cv.namedWindow('hist', 0)
    hist_scale = 10

    def set_scale(val):
        global hist_scale
        hist_scale = val
    cv.createTrackbar('scale', 'hist', hist_scale, 32, set_scale)

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cam = video.create_capture(fn, fallback='synth:bg=../data/baboon.jpg:class=chess:noise=0.05')

    while True:
        flag, frame = cam.read()
        cv.imshow('camera', frame)

        small = cv.pyrDown(frame)

        hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV)
        dark = hsv[...,2] < 32
        hsv[dark] = 0
        h = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])

        h = np.clip(h*0.005*hist_scale, 0, 1)
        vis = hsv_map*h[:,:,np.newaxis] / 255.0
        cv.imshow('hist', vis)
Example #6
0
 def __init__(self, video_src):
     self.cam = self.cam = video.create_capture(video_src, presets['book'])
     self.p0 = None
     self.use_ransac = True
Example #7
0
    img = levels[-1]
    for lev_img in levels[-2::-1]:
        img = cv.pyrUp(img, dstsize=getsize(lev_img))
        img += lev_img
    return np.uint8(np.clip(img, 0, 255))


if __name__ == '__main__':
    import sys
    print(__doc__)

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)

    leveln = 6
    cv.namedWindow('level control')
    for i in xrange(leveln):
        cv.createTrackbar('%d' % i, 'level control', 5, 50, nothing)

    while True:
        ret, frame = cap.read()

        pyr = build_lappyr(frame, leveln)
        for i in xrange(leveln):
            v = int(cv.getTrackbarPos('%d' % i, 'level control') / 5)
            pyr[i] *= v
        res = merge_lappyr(pyr)
Example #8
0
-----
    ESC   - exit

'''

import cv2 as cv
from face import video
import sys

if __name__ == '__main__':
    try:
        video_src = sys.argv[1]
    except:
        video_src = 0

    cam = video.create_capture(video_src)
    mser = cv.MSER_create()

    while True:
        ret, img = cam.read()
        if ret == 0:
            break
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        vis = img.copy()

        regions, _ = mser.detectRegions(gray)
        hulls = [cv.convexHull(p.reshape(-1, 1, 2)) for p in regions]
        cv.polylines(vis, hulls, 1, (0, 255, 0))

        cv.imshow('img', vis)
        if cv.waitKey(5) == 27:
def main():
    try:
        src = sys.argv[1]
    except:
        src = 0
    cap = video.create_capture(src)

    classifier_fn = 'digits_svm.dat'
    if not os.path.exists(classifier_fn):
        print('"%s" not found, run digits.py first' % classifier_fn)
        return

    if True:
        model = cv.ml.SVM_load(classifier_fn)
    else:
        model = cv.ml.SVM_create()
        model.load_(classifier_fn
                    )  #Known bug: https://github.com/opencv/opencv/issues/4969

    while True:
        _ret, frame = cap.read()
        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

        bin = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C,
                                   cv.THRESH_BINARY_INV, 31, 10)
        bin = cv.medianBlur(bin, 3)
        _, contours, heirs = cv.findContours(bin.copy(), cv.RETR_CCOMP,
                                             cv.CHAIN_APPROX_SIMPLE)
        try:
            heirs = heirs[0]
        except:
            heirs = []

        for cnt, heir in zip(contours, heirs):
            _, _, _, outer_i = heir
            if outer_i >= 0:
                continue
            x, y, w, h = cv.boundingRect(cnt)
            if not (16 <= h <= 64 and w <= 1.2 * h):
                continue
            pad = max(h - w, 0)
            x, w = x - (pad // 2), w + pad
            cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0))

            bin_roi = bin[y:, x:][:h, :w]

            m = bin_roi != 0
            if not 0.1 < m.mean() < 0.4:
                continue
            '''
            gray_roi = gray[y:,x:][:h,:w]
            v_in, v_out = gray_roi[m], gray_roi[~m]
            if v_out.std() > 10.0:
                continue
            s = "%f, %f" % (abs(v_in.mean() - v_out.mean()), v_out.std())
            cv.putText(frame, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
            '''

            s = 1.5 * float(h) / SZ
            m = cv.moments(bin_roi)
            c1 = np.float32([m['m10'], m['m01']]) / m['m00']
            c0 = np.float32([SZ / 2, SZ / 2])
            t = c1 - s * c0
            A = np.zeros((2, 3), np.float32)
            A[:, :2] = np.eye(2) * s
            A[:, 2] = t
            bin_norm = cv.warpAffine(bin_roi,
                                     A, (SZ, SZ),
                                     flags=cv.WARP_INVERSE_MAP
                                     | cv.INTER_LINEAR)
            bin_norm = deskew(bin_norm)
            if x + w + SZ < frame.shape[1] and y + SZ < frame.shape[0]:
                frame[y:, x + w:][:SZ, :SZ] = bin_norm[..., np.newaxis]

            sample = preprocess_hog([bin_norm])
            digit = model.predict(sample)[0]
            cv.putText(frame,
                       '%d' % digit, (x, y),
                       cv.FONT_HERSHEY_PLAIN,
                       1.0, (200, 0, 0),
                       thickness=1)

        cv.imshow('frame', frame)
        cv.imshow('bin', bin)
        ch = cv.waitKey(1)
        if ch == 27:
            break
Example #10
0
 def __init__(self, video_src):
     self.track_len = 10
     self.detect_interval = 5
     self.tracks = []
     self.cam = video.create_capture(video_src)
     self.frame_idx = 0