Exemplo n.º 1
0
    def start(self):
        """
        Stop device
        :return: None
        """
        self.ctx = openni.Context()
        self.ctx.init()

        # Create a depth generator
        self.depth = openni.DepthGenerator()
        self.depth.create(self.ctx)

        # Set it to VGA maps at 30 FPS
        self.depth.set_resolution_preset(openni.RES_VGA)
        self.depth.fps = 30

        # Create a color generator
        self.color = openni.ImageGenerator()
        self.color.create(self.ctx)

        # Set it to VGA maps at 30 FPS
        self.color.set_resolution_preset(openni.RES_VGA)
        self.color.fps = 30

        # Start generating
        self.ctx.start_generating_all()
Exemplo n.º 2
0
    def __init__(self, dir_pub):

        self.ctx_zmq = zmq.Context()
        self.socket = self.ctx_zmq.socket(zmq.PUB)
        self.socket.bind(dir_pub)

        self.context = openni.Context()
        self.context.init()
        self.depth_generator = openni.DepthGenerator()
        self.depth_generator.create(self.context)
        self.depth_generator.set_resolution_preset(openni.RES_VGA)
        self.depth_generator.fps = 30

        self.gesture_generator = openni.GestureGenerator()
        self.gesture_generator.create(self.context)
        self.gesture_generator.add_gesture('Wave')

        self.hands_generator = openni.HandsGenerator()
        self.hands_generator.create(self.context)

        self.gesture_generator.register_gesture_cb(self.gesture_detected,
                                                   self.gesture_progress)
        self.hands_generator.register_hand_cb(self.create, self.update,
                                              self.destroy)
        self.context.start_generating_all()

        def update_frame(self):
            self.context.wait_any_update_all()
Exemplo n.º 3
0
    def __init__(self, game):
        self.game = game
        self.ctx = openni.Context()
        self.ctx.init()

        self.user = openni.UserGenerator()
        self.user.create(self.ctx)

        self.depth_generator = openni.DepthGenerator()
        self.depth_generator.create(self.ctx)
        self.depth_generator.set_resolution_preset(openni.RES_VGA)
        self.depth_generator.fps = 30

        self.image_generator = openni.ImageGenerator()
        self.image_generator.create(self.ctx)
        self.image_generator.set_resolution_preset(openni.RES_VGA)
        self.depth_generator.alternative_view_point_cap.set_view_point(
            self.image_generator)

        self.skel_cap = self.user.skeleton_cap
        self.pose_cap = self.user.pose_detection_cap

        # Define Joins we want to track
        self.joints = [
            'SKEL_HEAD', 'SKEL_LEFT_FOOT', 'SKEL_RIGHT_SHOULDER',
            'SKEL_LEFT_HAND', 'SKEL_NECK', 'SKEL_RIGHT_FOOT', 'SKEL_LEFT_HIP',
            'SKEL_RIGHT_HAND', 'SKEL_TORSO', 'SKEL_LEFT_ELBOW',
            'SKEL_LEFT_KNEE', 'SKEL_RIGHT_HIP', 'SKEL_LEFT_SHOULDER',
            'SKEL_RIGHT_ELBOW', 'SKEL_RIGHT_KNEE'
        ]
Exemplo n.º 4
0
    def __init__(self, game):

        self.context = openni.Context()
        self.context.init()
        self.depth_generator = openni.DepthGenerator()
        self.depth_generator.create(self.context)
        self.depth_generator.set_resolution_preset(openni.RES_VGA)
        self.depth_generator.fps = 30

        self.image_generator = openni.ImageGenerator()
        self.image_generator.create(self.context)
        self.image_generator.set_resolution_preset(openni.RES_VGA)
        
        self.gesture_generator = openni.GestureGenerator()
        self.gesture_generator.create(self.context)
        self.gesture_generator.add_gesture('Wave')
        
        self.hands_generator = openni.HandsGenerator()
        self.hands_generator.create(self.context)

        self.gesture_generator.register_gesture_cb(self.gesture_detected, self.gesture_progress)
        self.hands_generator.register_hand_cb(self.create, self.update, self.destroy)

        self.game = game 
Exemplo n.º 5
0
import cv2
import numpy as np
import openni

# Initialise OpenNI
context = openni.Context()
context.init()

# Create a depth generator to access the depth stream
depth = openni.DepthGenerator()
depth.create(context)
depth.set_resolution_preset(openni.RES_VGA)
depth.fps = 30

# Start Kinect
context.start_generating_all()
context.wait_any_update_all()

# Create array from the raw depth map string
frame = np.fromstring(depth.get_raw_depth_map_8(), "uint8").reshape(480, 640)

# Render in OpenCV
cv2.imshow("image", frame)