Beispiel #1
0
import matplotlib.pyplot as plt
import matplotlib as mpl
from gestures.utils.framebuffer import FrameBuffer,ImageBuffer
from gestures.segmentation import GaussianSkinSegmenter
from gestures.utils.gui import VideoApp
import cv2

get_imdisp = lambda ax: ax.get_images()[0]
blur = lambda x: cv2.blur(x,(9,9),borderType=cv2.BORDER_REFLECT)


fig, axes = plt.subplots(1,2)
axes = dict(zip(['raw','skin'],axes.ravel()))
for k,ax in axes.items(): ax.set_title(k)

cap = FrameBuffer.from_argv()
curr = cap.read()
axes['raw'].imshow(curr)
axes['skin'].imshow(curr)
fig.tight_layout()

app = VideoApp(fig,cap)
coseg = GaussianSkinSegmenter()

fig.show()
while app:
    curr = blur(cap.read())
    skin = coseg(curr)

    dispimg = curr.copy()
    dispimg *= skin[...,None]
Beispiel #2
0
import matplotlib as mpl
from gestures.utils.framebuffer import FrameBuffer
from gestures.utils.gui import VideoApp
from gestures.hand_detection import ConvexityHandDetector
from gestures.segmentation import GaussianSkinSegmenter


blur = lambda x: cv2.blur(x,(9,9),borderType=cv2.BORDER_REFLECT)
get_imdisp = lambda ax: ax.get_images()[0]


fig, axes = plt.subplots(1,2)
axes = dict(zip(['raw','skin'],axes.ravel()))
for k,ax in axes.items(): ax.set_title(k)

cap = FrameBuffer.from_argv()
curr = cap.read()
axes['raw'].imshow(curr)
axes['skin'].imshow(curr,cmap=mpl.cm.get_cmap('gray'))
fig.tight_layout()

hdetect = ConvexityHandDetector()
smseg = GaussianSkinSegmenter()
app = VideoApp(fig,cap)

fig.show()
while app:
    curr = blur(cap.read())
    dispimg = curr.copy()
    mask = smseg(curr)
Beispiel #3
0
 def __init__(self,fig,cap=None,**kwargs):
     self._cap = cap
     if cap is None:
         self._cap = FrameBuffer.from_argv()
     App.__init__(self,fig,**kwargs)
Beispiel #4
0
 def __init__(self, fig, cap=None, **kwargs):
     self._cap = cap
     if cap is None:
         self._cap = FrameBuffer.from_argv()
     App.__init__(self, fig, **kwargs)