コード例 #1
0
ファイル: meanshift_track.py プロジェクト: baojiwei/gestures
    def __init__(self,callback,**kwargs):
        fig, axes = plt.subplots(1,2)
        self.axes = dict(zip(['raw','backprojection'],axes.ravel()))
        for k,ax in self.axes.items(): ax.set_title(k)

        VideoApp.__init__(self,fig,**kwargs)

        img = self._cap.read()
        self.axes['raw'].imshow(img)
        self.axes['backprojection'].imshow(img[...,0],cmap=mpl.cm.get_cmap('gray'))
        fig.tight_layout()

        cid = fig.canvas.mpl_connect('button_press_event', self.on_press)
        cid = fig.canvas.mpl_connect('button_release_event', self.on_release)

        self.SELECTING = False
        self.bbox = None
        self._bbox = None
        self.callback = callback
        self.show = self._fig.show
コード例 #2
0
    def __init__(self, callback, **kwargs):
        fig, axes = plt.subplots(1, 2)
        self.axes = dict(zip(['raw', 'backprojection'], axes.ravel()))
        for k, ax in self.axes.items():
            ax.set_title(k)

        VideoApp.__init__(self, fig, **kwargs)

        img = self._cap.read()
        self.axes['raw'].imshow(img)
        self.axes['backprojection'].imshow(img[..., 0],
                                           cmap=mpl.cm.get_cmap('gray'))
        fig.tight_layout()

        cid = fig.canvas.mpl_connect('button_press_event', self.on_press)
        cid = fig.canvas.mpl_connect('button_release_event', self.on_release)

        self.SELECTING = False
        self.bbox = None
        self._bbox = None
        self.callback = callback
        self.show = self._fig.show
コード例 #3
0
ファイル: skin_segment.py プロジェクト: baojiwei/gestures
from gestures.utils.gui import VideoApp
import cv2

get_imdisp = lambda ax: ax.get_images()[0]
blur = lambda x: cv2.blur(x,(9,9),borderType=cv2.BORDER_REFLECT)


fig, axes = plt.subplots(1,2)
axes = dict(zip(['raw','skin'],axes.ravel()))
for k,ax in axes.items(): ax.set_title(k)

cap = FrameBuffer.from_argv()
curr = cap.read()
axes['raw'].imshow(curr)
axes['skin'].imshow(curr)
fig.tight_layout()

app = VideoApp(fig,cap)
coseg = GaussianSkinSegmenter()

fig.show()
while app:
    curr = blur(cap.read())
    skin = coseg(curr)

    dispimg = curr.copy()
    dispimg *= skin[...,None]

    app.update_artists((get_imdisp(axes['raw']), curr[:,:,::-1])
                       ,(get_imdisp(axes['skin']), dispimg[:,:,::-1]))
コード例 #4
0
ファイル: fused_segment.py プロジェクト: baojiwei/gestures
cap = FrameBuffer.from_argv()
curr = cap.read()
axes['raw'].imshow(curr)
axes['moving'].imshow(curr,cmap=mpl.cm.get_cmap('gray'))
axes['skin'].imshow(curr,cmap=mpl.cm.get_cmap('gray'))
axes['fused'].imshow(curr,cmap=mpl.cm.get_cmap('gray'))
fig.tight_layout()

prev = blur(curr)
curr = blur(cap.read())
prevg = cv2.cvtColor(prev,cv2.COLOR_BGR2GRAY)
currg = cv2.cvtColor(curr,cv2.COLOR_BGR2GRAY)
smseg = SkinMotionSegmenter(prevg,currg)

app = VideoApp(fig,cap)
fig.show()
while app:
    curr = blur(cap.read())
    mask = smseg(curr)
    dispimg = curr.copy()

    if smseg.bbox is not None:
        x,y,w,h = smseg.bbox
        cv2.circle(dispimg,tuple(smseg.com),5,color=(0,255,0),thickness=-1)
        cv2.rectangle(dispimg,(x,y),(x+w,y+h),color=(0,204,255),thickness=2)

    app.update_artists((get_imdisp(axes['raw']), dispimg[:,:,::-1])
                       ,(get_imdisp(axes['moving']), smseg.motion*255)
                       ,(get_imdisp(axes['skin']), smseg.skin*255)
                       ,(get_imdisp(axes['fused']), smseg.backprojection*255))
コード例 #5
0
get_imdisp = lambda ax: ax.get_images()[0]


fig, axes = plt.subplots(1,2)
axes = dict(zip(['raw','skin'],axes.ravel()))
for k,ax in axes.items(): ax.set_title(k)

cap = FrameBuffer.from_argv()
curr = cap.read()
axes['raw'].imshow(curr)
axes['skin'].imshow(curr,cmap=mpl.cm.get_cmap('gray'))
fig.tight_layout()

hdetect = ConvexityHandDetector()
smseg = GaussianSkinSegmenter()
app = VideoApp(fig,cap)

fig.show()
while app:
    curr = blur(cap.read())
    dispimg = curr.copy()
    mask = smseg(curr)

    detected = hdetect(mask)
    if detected:
        hull_pts = hdetect.hull
        com = (np.sum(hull_pts[:,0,:],axis=0,dtype=float)/len(hull_pts)).astype(int)
        cv2.circle(dispimg,tuple(com),5,color=(0,255,0),thickness=-1)

    color = (255,0,0) if detected else (0,0,255)
    cv2.drawContours(dispimg,[hdetect.hull],0,color,3)
コード例 #6
0
ファイル: motion_segment.py プロジェクト: baojiwei/gestures
cap = FrameBuffer.from_argv()
prev = blur(cap.read())
prevg = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
currg = cv2.cvtColor(blur(cap.read()), cv2.COLOR_BGR2GRAY)

fig, axes = plt.subplots(2, 2)
axes = dict(zip(["raw", "bkgnd", "thresh", "moving"], axes.ravel()))
for k, ax in axes.items():
    ax.set_title(k)
axes["raw"].imshow(prev)
axes["bkgnd"].imshow(prevg, cmap=mpl.cm.get_cmap("gray"))
axes["moving"].imshow(prevg, cmap=mpl.cm.get_cmap("gray"))
axes["thresh"].imshow(prevg, cmap=mpl.cm.get_cmap("gray"))
fig.tight_layout()

app = VideoApp(fig, cap=cap)
moseg = MotionSegmenter(prevg, currg, dict(alpha=0.1))

fig.show()
while app:
    curr = blur(cap.read())
    moving = moseg(cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY))
    dispimg = curr

    # estimate centroid and bounding box
    if moseg.bbox is not None:
        x, y, w, h = moseg.bbox
        cv2.circle(dispimg, (x, y), 5, color=(0, 255, 0), thickness=-1)
        cv2.rectangle(dispimg, (x, y), (x + w, y + h), color=(0, 204, 255), thickness=2)

    app.update_artists(
コード例 #7
0
cap = FrameBuffer.from_argv()
prev = blur(cap.read())
prevg = cv2.cvtColor(prev,cv2.COLOR_BGR2GRAY)
currg = cv2.cvtColor(blur(cap.read()),cv2.COLOR_BGR2GRAY)

fig, axes = plt.subplots(2,2)
axes = dict(zip(['raw','bkgnd','thresh','moving'],axes.ravel()))
for k,ax in axes.items(): ax.set_title(k)
axes['raw'].imshow(prev)
axes['bkgnd'].imshow(prevg,cmap=mpl.cm.get_cmap('gray'))
axes['moving'].imshow(prevg,cmap=mpl.cm.get_cmap('gray'))
axes['thresh'].imshow(prevg,cmap=mpl.cm.get_cmap('gray'))
fig.tight_layout()

app = VideoApp(fig,cap=cap)
moseg = MotionSegmenter(prevg,currg,dict(alpha=0.1))

fig.show()
while app:
    curr = blur(cap.read())
    moving = moseg(cv2.cvtColor(curr,cv2.COLOR_BGR2GRAY))
    dispimg = curr

    # estimate centroid and bounding box
    if moseg.bbox is not None:
        x,y,w,h = moseg.bbox
        cv2.circle(dispimg,(x,y),5,color=(0,255,0),thickness=-1)
        cv2.rectangle(dispimg,(x,y),(x+w,y+h),color=(0,204,255),thickness=2)
    
    app.update_artists((get_imdisp(axes['raw']),dispimg[...,::-1].copy())