def __init__(self, src): self._src = src self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector() self._contour = HandContourDetector() self._pose = PoseClassifier(MultiPoseClassifier(src))
def __init__(self, id, nsamples, dst): self._id = id self._nsamples = nsamples self._dst = dst self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector() self._contour = HandContourDetector() self._feature = GaborDescriptors(4, 4)
def __init__(self, id, nsamples, dst): self._id = id self._nsamples = nsamples self._dst = dst self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector() self._contour = HandContourDetector() self._fdesc = FourierDescriptors() self._train = []
class TestHand(): def __init__(self): self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector(HandOtsu()) self._contour = HandContourDetector() self._palm = PalmDetector() def run(self): for (depth, depth8, rgb) in self._kinect.get_data(): cv2.imshow('rgb', cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)) cv2.imshow('depth', depth8) hand, mask = self._hand.run(depth, depth8) (_, _, crop) = self._contour.run(mask) if crop == None: continue cv2.imshow('hand', crop) hand = self._palm.run(hand, crop) if hand == None: continue cv2.imshow('hand final', hand) cv2.waitKey(10) def _crop(self, img, box): crop = img[box[1]:box[1] + box[3], box[0]:box[0] + box[2]] return crop
class TestHand(): def __init__(self): self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector(HandOtsu()) self._contour = HandContourDetector() self._palm = PalmDetector() def run(self): for (depth, depth8, rgb) in self._kinect.get_data(): cv2.imshow('rgb', cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)) cv2.imshow('depth', depth8) hand, mask = self._hand.run(depth, depth8) (_, _, crop) = self._contour.run(mask) if crop == None: continue cv2.imshow('hand', crop) hand = self._palm.run(hand, crop) if hand == None: continue cv2.imshow('hand final', hand) cv2.waitKey(10) def _crop(self, img, box): crop = img[box[1]:box[1]+box[3], box[0]:box[0]+box[2]] return crop
class TestPose(): def __init__(self, src): self._src = src self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector() self._contour = HandContourDetector() self._pose = PoseClassifier(MultiPoseClassifier(src)) def run(self): for (depth, depth8, rgb) in self._kinect.get_data(): contour = self._get_hand_contour(depth8, depth, rgb) if contour.any(): self._contour.draw() print self._pose.run(contour) cv2.waitKey(5) def _get_hand_contour(self, depth8, depth, rgb): body = self._body.run(depth8) (hand, _) = self._hand.run(body) (cont, box, hc) = self._contour.run(hand) if self._contour.not_valid(): return np.array([]) (cont, _, _) = self._contour.run(rgb, True, box, hc, depth) return cont
class TrainPose(): def __init__(self, id, nsamples, dst): self._id = id self._nsamples = nsamples self._dst = dst self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector() self._contour = HandContourDetector() self._fdesc = FourierDescriptors() self._train = [] def run(self): warmup = True for (depth8, depth, rgb) in self._kinect.get_data(): contour = self._get_hand_contour(depth8, depth, rgb) if not contour: continue self._contour.draw() if warmup: key = cv2.waitKey(5) if key == GO: warmup = False continue fd = self._fdesc.run(contour) self._train.append(fd) if len(self._train) == self._nsamples: self._save() break cv2.waitKey(5) def _get_hand_contour(self, depth8, depth, rgb): body = self._body.run(depth8) (hand, _) = self._hand.run(body) (cont, box, hc) = self._contour.run(hand) if self._contour.not_valid(): return [] (cont, _, _) = self._contour.run(rgb, True, box, hc, depth) return cont def _save(self): data = np.array(self._train) model = EmpiricalCovariance().fit(np.array(self._train)) output = {'id': self._id, 'data': data, 'model': model} pickle.dump(output, open(self._dst, 'wb'))
class TrainPose(): def __init__(self, id, nsamples, dst): self._id = id self._nsamples = nsamples self._dst = dst self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector() self._contour = HandContourDetector() self._feature = GaborDescriptors(4, 4) def run(self): warmup = True train = [] model = pickle.load(open('svm.pck', 'rb')) for (depth, depth8, rgb) in self._kinect.get_data(): body = self._body.run(depth8) (hand, _) = self._hand.run(body) (cont, box, crop) = self._contour.run(hand) hand = hand[box[1]:box[1]+box[3], box[0]:box[0]+box[2]] cv2.imshow('hand', hand) key = cv2.waitKey(2) #if warmup: # if key == GO: # warmup = False # continue #if key != 97: # continue feature = self._feature.run(hand) print model.predict(feature) #train.append(feature) #if len(train) == self._nsamples: # self._save(train) # break #cv2.waitKey(2) def _save(self, train): data = np.array(train) labels = self._id * np.ones(len(train), np.int) output = {'labels': labels, 'data': data} pickle.dump(output, open(self._dst, 'wb'))
def __init__(self): self._kinect = Kinect() self._body = BodyDetector() self._hand = HandDetector(HandOtsu()) self._contour = HandContourDetector() self._palm = PalmDetector()
#!/usr/bin/env python # -*- coding: utf-8 -*- import cv2 import numpy as np import config from kinect import Kinect from body import BodyDetector from hand import HandDetector, HandContourDetector from pose import PoseClassifier, OpenCloseClassifier, MultiPoseClassifier kinect = Kinect() body = BodyDetector() hand = HandDetector() contour = HandContourDetector() #pose = PoseClassifier(OpenCloseClassifier()) pose = PoseClassifier(MultiPoseClassifier()) for (depth, depth8, rgb) in kinect.get_data(): b = body.run(depth8) (h, _) = hand.run(b) #cv2.imshow('hand', h) (ccc, box, hc) = contour.run(h) if len(ccc) < 100:
import cv2 import argparse import chainer import hand from entity import params from pose import PoseDetector, draw_person_pose from hand import HandDetector, draw_hand_keypoints chainer.using_config('enable_backprop', False) if __name__ == '__main__': # load model pose_detector = PoseDetector("posenet", "models/coco_posenet.npz") hand_detector = HandDetector("handnet", "models/handnet.npz") # read image img = cv2.imread('data/dinner.png') # inference print("Estimating pose...") person_pose_array, _ = pose_detector(img) res_img = cv2.addWeighted(img, 0.6, draw_person_pose(img, person_pose_array), 0.4, 0) # each person detected for person_pose in person_pose_array: unit_length = pose_detector.get_unit_length(person_pose) # hands estimation print("Estimating hands keypoints...")
#!/usr/bin/env python # -*- coding: utf-8 -*- import cv2 import numpy as np import config from kinect import Kinect from body import BodyDetector from hand import HandDetector, HandContourDetector from pose import PoseClassifier, OpenCloseClassifier, MultiPoseClassifier kinect = Kinect() body = BodyDetector() hand = HandDetector() contour = HandContourDetector() # pose = PoseClassifier(OpenCloseClassifier()) pose = PoseClassifier(MultiPoseClassifier()) for (depth, depth8, rgb) in kinect.get_data(): b = body.run(depth8) (h, _) = hand.run(b) # cv2.imshow('hand', h) (ccc, box, hc) = contour.run(h) if len(ccc) < 100: