Beispiel #1
0
class TestHand():
    def __init__(self):
        self._kinect = Kinect()
        self._body = BodyDetector()
        self._hand = HandDetector(HandOtsu())
        self._contour = HandContourDetector()
        self._palm = PalmDetector()

    def run(self):
        for (depth, depth8, rgb) in self._kinect.get_data():

            cv2.imshow('rgb', cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB))
            cv2.imshow('depth', depth8)

            hand, mask = self._hand.run(depth, depth8)
            (_, _, crop) = self._contour.run(mask)

            if crop == None:
                continue

            cv2.imshow('hand', crop)

            hand = self._palm.run(hand, crop)
            if hand == None:
                continue

            cv2.imshow('hand final', hand)

            cv2.waitKey(10)

    def _crop(self, img, box):
        crop = img[box[1]:box[1] + box[3], box[0]:box[0] + box[2]]
        return crop
Beispiel #2
0
class TestPose():

    def __init__(self, src):
        self._src     = src
        self._kinect  = Kinect()
        self._body    = BodyDetector()
        self._hand    = HandDetector()
        self._contour = HandContourDetector()
        self._pose    = PoseClassifier(MultiPoseClassifier(src))

    def run(self):
        for (depth, depth8, rgb) in self._kinect.get_data():
            contour = self._get_hand_contour(depth8, depth, rgb)

            if contour.any():
                self._contour.draw()
                print self._pose.run(contour)

            cv2.waitKey(5)

    def _get_hand_contour(self, depth8, depth, rgb):
        body            = self._body.run(depth8)
        (hand, _)       = self._hand.run(body)
        (cont, box, hc) = self._contour.run(hand)

        if self._contour.not_valid():
            return np.array([])

        (cont, _, _) = self._contour.run(rgb, True, box, hc, depth)

        return cont
Beispiel #3
0
class TestHand():

    def __init__(self):
        self._kinect  = Kinect()
        self._body    = BodyDetector()
        self._hand    = HandDetector(HandOtsu())
        self._contour = HandContourDetector()
        self._palm    = PalmDetector()

    def run(self):
        for (depth, depth8, rgb) in self._kinect.get_data():

            cv2.imshow('rgb', cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB))
            cv2.imshow('depth', depth8)

            hand, mask = self._hand.run(depth, depth8)
            (_, _, crop) = self._contour.run(mask)

            if crop == None:
                continue

            cv2.imshow('hand', crop)

            hand = self._palm.run(hand, crop)
            if hand == None:
                continue

            cv2.imshow('hand final', hand)

            cv2.waitKey(10)


    def _crop(self, img, box):
        crop = img[box[1]:box[1]+box[3], box[0]:box[0]+box[2]]
        return crop
Beispiel #4
0
class TestPose():
    def __init__(self, src):
        self._src = src
        self._kinect = Kinect()
        self._body = BodyDetector()
        self._hand = HandDetector()
        self._contour = HandContourDetector()
        self._pose = PoseClassifier(MultiPoseClassifier(src))

    def run(self):
        for (depth, depth8, rgb) in self._kinect.get_data():
            contour = self._get_hand_contour(depth8, depth, rgb)

            if contour.any():
                self._contour.draw()
                print self._pose.run(contour)

            cv2.waitKey(5)

    def _get_hand_contour(self, depth8, depth, rgb):
        body = self._body.run(depth8)
        (hand, _) = self._hand.run(body)
        (cont, box, hc) = self._contour.run(hand)

        if self._contour.not_valid():
            return np.array([])

        (cont, _, _) = self._contour.run(rgb, True, box, hc, depth)

        return cont
Beispiel #5
0
class TrainPose():

    def __init__(self, id, nsamples, dst):
        self._id       = id
        self._nsamples = nsamples
        self._dst      = dst
        self._kinect   = Kinect()
        self._body     = BodyDetector()
        self._hand     = HandDetector()
        self._contour  = HandContourDetector()
        self._fdesc    = FourierDescriptors()
        self._train    = []

    def run(self):
        warmup = True
        for (depth8, depth, rgb) in self._kinect.get_data():
            contour = self._get_hand_contour(depth8, depth, rgb)
            if not contour:
                continue

            self._contour.draw()

            if warmup:
                key = cv2.waitKey(5)
                if key == GO:
                    warmup = False
                continue

            fd = self._fdesc.run(contour)
            self._train.append(fd)

            if len(self._train) == self._nsamples:
                self._save()
                break

            cv2.waitKey(5)

    def _get_hand_contour(self, depth8, depth, rgb):
        body = self._body.run(depth8)
        (hand, _) = self._hand.run(body)
        (cont, box, hc) = self._contour.run(hand)

        if self._contour.not_valid():
            return []

        (cont, _, _) = self._contour.run(rgb, True, box, hc, depth)

        return cont

    def _save(self):
        data = np.array(self._train)
        model = EmpiricalCovariance().fit(np.array(self._train))
        output = {'id': self._id, 'data': data,  'model': model}
        pickle.dump(output, open(self._dst, 'wb'))
Beispiel #6
0
class TrainPose():
    def __init__(self, id, nsamples, dst):
        self._id = id
        self._nsamples = nsamples
        self._dst = dst
        self._kinect = Kinect()
        self._body = BodyDetector()
        self._hand = HandDetector()
        self._contour = HandContourDetector()
        self._fdesc = FourierDescriptors()
        self._train = []

    def run(self):
        warmup = True
        for (depth8, depth, rgb) in self._kinect.get_data():
            contour = self._get_hand_contour(depth8, depth, rgb)
            if not contour:
                continue

            self._contour.draw()

            if warmup:
                key = cv2.waitKey(5)
                if key == GO:
                    warmup = False
                continue

            fd = self._fdesc.run(contour)
            self._train.append(fd)

            if len(self._train) == self._nsamples:
                self._save()
                break

            cv2.waitKey(5)

    def _get_hand_contour(self, depth8, depth, rgb):
        body = self._body.run(depth8)
        (hand, _) = self._hand.run(body)
        (cont, box, hc) = self._contour.run(hand)

        if self._contour.not_valid():
            return []

        (cont, _, _) = self._contour.run(rgb, True, box, hc, depth)

        return cont

    def _save(self):
        data = np.array(self._train)
        model = EmpiricalCovariance().fit(np.array(self._train))
        output = {'id': self._id, 'data': data, 'model': model}
        pickle.dump(output, open(self._dst, 'wb'))
Beispiel #7
0
class TrainPose():

    def __init__(self, id, nsamples, dst):
        self._id       = id
        self._nsamples = nsamples
        self._dst      = dst
        self._kinect   = Kinect()
        self._body     = BodyDetector()
        self._hand     = HandDetector()
        self._contour  = HandContourDetector()
        self._feature  = GaborDescriptors(4, 4)

    def run(self):
        warmup = True
        train = []
        model = pickle.load(open('svm.pck', 'rb'))

        for (depth, depth8, rgb) in self._kinect.get_data():
            body      = self._body.run(depth8)
            (hand, _) = self._hand.run(body)
            (cont, box, crop) = self._contour.run(hand)

            hand = hand[box[1]:box[1]+box[3], box[0]:box[0]+box[2]]

            cv2.imshow('hand', hand)
            key = cv2.waitKey(2)

            #if warmup:
            #    if key == GO:
            #        warmup = False
            #    continue

            #if key != 97:
            #    continue

            feature = self._feature.run(hand)
            print model.predict(feature)
            #train.append(feature)

            #if len(train) == self._nsamples:
            #    self._save(train)
            #    break

            #cv2.waitKey(2)

    def _save(self, train):
        data = np.array(train)
        labels = self._id * np.ones(len(train), np.int)
        output = {'labels': labels, 'data': data}
        pickle.dump(output, open(self._dst, 'wb'))
Beispiel #8
0
            i += 1
        return (self._interval[0], self._interval[0] + i)

    def _normalize(self, x):
        xn = x / x.sum()
        return xn

    def show(self):
        if self._body.any() and self._depth.any():
            img = np.hstack((self._depth, self._body))
            str = 'th1=%d th2=%d' % (self._interval[0], self._interval[1])
            cv2.imshow(str, img)


if __name__ == '__main__':
    from kinect import Kinect

    kinect = Kinect()
    body = BodyDetector()

    for (d, _) in kinect.get_data():

        b = body.run(d)

        #body.show()

        if kinect.stop(kinect.wait()):
            break

        #import sys; sys.stdin.read(1)
Beispiel #9
0
        return (self._interval[0], self._interval[0] + i)

    def _normalize(self, x):
        xn = x / x.sum()
        return xn

    def show(self):
        if self._body.any() and self._depth.any():
            img = np.hstack((self._depth, self._body))
            str = 'th1=%d th2=%d' %(self._interval[0], self._interval[1])
            cv2.imshow(str, img)


if __name__ == '__main__':
    from kinect import Kinect

    kinect = Kinect()
    body = BodyDetector()
    

    for (d, _) in kinect.get_data():

        b = body.run(d)

        #body.show()

        if kinect.stop(kinect.wait()):
            break

        #import sys; sys.stdin.read(1)
Beispiel #10
0
from kinect import Kinect
from body import BodyDetector
from hand import HandDetector, HandContourDetector
from pose import PoseClassifier, OpenCloseClassifier, MultiPoseClassifier


kinect  = Kinect()
body    = BodyDetector()
hand    = HandDetector()
contour = HandContourDetector()
#pose   = PoseClassifier(OpenCloseClassifier())
pose    = PoseClassifier(MultiPoseClassifier())


for (depth, depth8, rgb) in kinect.get_data():

    b = body.run(depth8)
    (h, _) = hand.run(b)

    #cv2.imshow('hand', h)

    (ccc, box, hc) = contour.run(h)

    if len(ccc) < 100:
        continue

    (ccc, _, _) = contour.run(rgb, True, box, hc, depth)

    p = pose.run(ccc)
    if p == -1:
Beispiel #11
0
from kinect import Kinect
from body import BodyDetector
from hand import HandDetector, HandContourDetector
from pose import PoseClassifier, OpenCloseClassifier, MultiPoseClassifier


kinect = Kinect()
body = BodyDetector()
hand = HandDetector()
contour = HandContourDetector()
# pose   = PoseClassifier(OpenCloseClassifier())
pose = PoseClassifier(MultiPoseClassifier())


for (depth, depth8, rgb) in kinect.get_data():

    b = body.run(depth8)
    (h, _) = hand.run(b)

    # cv2.imshow('hand', h)

    (ccc, box, hc) = contour.run(h)

    if len(ccc) < 100:
        continue

    (ccc, _, _) = contour.run(rgb, True, box, hc, depth)

    p = pose.run(ccc)
    if p == -1: