Ejemplo n.º 1
0
def GUI():
    global frame, goon, win
    for key, frame in autoStream():
        cv.imshow('cam', frame)
        if win is not None:
            cv.imshow('inception', win)
            win = None
    goon = False
Ejemplo n.º 2
0
        z1 = np.zeros([s, w2])
        z2 = np.zeros([s, s - w - w2])
        y = np.hstack([z1, x, z2])
    if h2 > 0:
        z1 = np.zeros([h2, s])
        z2 = np.zeros([s - h - h2, s])
        y = np.vstack([z1, x, z2])
    y = cv.resize(y, (20, 20)) / 255
    mx, my = center(y)
    H = np.array([[1., 0, 4 - (mx - 9.5)], [0, 1, 4 - (my - 9.5)]])
    return cv.warpAffine(y, H, (28, 28))


black = True

for key, frame in autoStream():

    cosas = extractThings(frame)

    nor = [adaptsize(c) for (x, y), c in cosas]

    loc = [c[0] for c in cosas]

    t0 = time.time()
    if nor:
        clas, prob = classify(nor)
    else:
        clas, prob = [], []
    t1 = time.time()

    for (x, y), label, pr in zip(loc, clas, prob):
Ejemplo n.º 3
0
import numpy as np
from umucv.stream import autoStream
from umucv.util import putText
import time

corners_params = dict(maxCorners=500,
                      qualityLevel=0.1,
                      minDistance=10,
                      blockSize=7)

lk_params = dict(winSize=(15, 15),
                 maxLevel=2,
                 criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10,
                           0.03))

for n, (key, frame) in enumerate(autoStream()):
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

    # resetear el punto inicial del tracking
    if n == 0 or key == ord('c'):
        corners = cv.goodFeaturesToTrack(gray, **corners_params).reshape(-1, 2)
        nextPts = corners
        prevgray = gray

    t0 = time.time()

    # encontramos la posición siguiente a partir de la anterior
    nextPts, status, err = cv.calcOpticalFlowPyrLK(prevgray, gray, nextPts,
                                                   None, **lk_params)
    prevgray = gray
Ejemplo n.º 4
0

def rots(c):
    return [np.roll(c, k, 0) for k in range(len(c))]


def bestPose(K, view, model):
    poses = [Pose(K, v.astype(float), model) for v in rots(view)]
    sp = sorted(poses, key=lambda p: p.rms)
    return sp[0]


marker = np.array([[0, 0, 0], [0, 1, 0], [0.5, 1, 0], [0.5, 0.5, 0],
                   [1, 0.5, 0], [1, 0, 0]])

stream = autoStream()

HEIGHT, WIDTH = next(stream)[1].shape[:2]
size = WIDTH, HEIGHT
print(size)

f = 1.7
K = kgen(size, f)  # fov aprox 60 degree
print(K)

## Create a GL View widget to display data
app = QtGui.QApplication([])
win = gl.GLViewWidget()
win.show()
win.setWindowTitle('pose')
win.setCameraPosition(distance=20)
Ejemplo n.º 5
0
#!/usr/bin/env python

import cv2 as cv
import time
from umucv.util import putText
from umucv.stream import autoStream

hog = cv.HOGDescriptor()
hog.setSVMDetector(cv.HOGDescriptor_getDefaultPeopleDetector())

MULTISCALE = True

for key, image in autoStream():

    if key == ord('m'):
        MULTISCALE = not MULTISCALE

    t0 = time.time()
    if MULTISCALE:
        (rects, weights) = hog.detectMultiScale(image,
                                                winStride=(4, 4),
                                                padding=(8, 8),
                                                scale=1.1)
    else:
        (rects, weights) = hog.detect(image, winStride=(4, 4), padding=(8, 8))
    t1 = time.time()

    if len(rects) > 0:
        for rect, p in zip(rects, weights.flatten()):
            if MULTISCALE:
                x, y, w, h = rect
Ejemplo n.º 6
0
# invertimos la imagen para evitar el rectángulo exterior blanco
mod = extractContours(255 - rgb2gray(readrgb('shapes/trebol.png')))[0].reshape(
    -1, 2)
imod = invar(mod)

model = np.zeros((200, 200), np.uint8)
cv.drawContours(model, [mod], -1, 255, 1)
cv.imshow('model', model)


def razonable(c):
    return cv.arcLength(c, closed=True) >= 50


for (key, frame) in autoStream():

    g = rgb2gray(frame)
    # sería bueno eliminar contornos que tocan el borde de la imagen, etc.
    contours = extractContours(g)

    ok = [c for c in contours if razonable(c)]

    found = [c for c in ok if norm(invar(c) - imod) < 0.15]
    #print(len(contours), len(ok), len(found))

    #cv.drawContours(frame, contours, -1, (255,128,128), 1)
    #cv.drawContours(frame, ok, -1, (0,0,255), 1)
    cv.drawContours(frame, found, -1, (0, 255, 0), cv.FILLED)

    cv.imshow('shapes', frame)
Ejemplo n.º 7
0
#!/usr/bin/env python

# adaptado de
# http://dlib.net/train_object_detector.py.html

import sys
import dlib
import cv2 as cv

from umucv.stream import autoStream

detector = dlib.simple_object_detector(sys.argv[1])
# We can look at the HOG filter we learned.
win_det = dlib.image_window()
win_det.set_image(detector)

for key, img in autoStream():
    dets = detector(img)
    for k, d in enumerate(dets):
        cv.rectangle(img, (d.left(), d.top()), (d.right(), d.bottom()),
                     (128, 128, 255), 3)
    cv.imshow("object detection", img)
Ejemplo n.º 8
0
    def __init__(self, xs, levels=16):
        self.redu = 256 // levels
        self.H = fil.gaussian_filter(hist(xs, self.redu), 1)
        print(self.H.shape)

    def __call__(self, img):
        r, g, b = np.floor_divide(x, self.redu).transpose(2, 0, 1)
        return self.H[r, g, b]


cv.namedWindow("input")
roi = ROI('input')

model = None

for key, x in autoStream():

    if model is not None:
        l = model(x)

        maxl = l.max()
        cv.imshow("likelihood",
                  np.clip((1 * l / maxl * 255), 0, 255).astype(np.uint8))

        term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1)
        ret, track_window = cv.CamShift(l, track_window, term_crit)

        cv.ellipse(x, ret, (0, 128, 255), 2)
        #pts = cv.boxPoints(ret).astype(int)
        #cv.polylines(x,[pts],True, (0,128,255),2)
Ejemplo n.º 9
0
def fun():
    global frame, goon, key
    for key, frame in autoStream():
        cv.imshow('cam', frame)
    goon = False
Ejemplo n.º 10
0
## Create a GL View widget to display data
app = QtGui.QApplication([])

#w = gl.GLViewWidget()
w = KeyPressWindow()
w.sigKeyPress.connect(keyPressed)

w.show()
w.setWindowTitle('gray level surface')
w.setCameraPosition(distance=50)

## Add a grid to the view
g = gl.GLGridItem()
w.addItem(g)

source = autoStream()


def getframe():
    key, frame = next(source)
    return cv.cvtColor(frame, cv.COLOR_BGR2GRAY).astype(float) / 255


p = gl.GLSurfacePlotItem(z=getframe(),
                         shader='heightColor',
                         computeNormals=False,
                         smooth=False)
p.scale(20 / 640, 20 / 640, 10)
p.translate(-8, -10, 0)
w.addItem(p)