Ejemplo n.º 1
0
    def __init__(self):

        pygame.init()
        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()
        # Set the width and height of the screen [width, height]
        self._infoObject = pygame.display.Info()
        self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1, self._infoObject.current_h >> 1),
                                               pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)
        pygame.display.set_caption("Gesture Recgnization by Kinect")
        # Loop until the user clicks the close button.
        self._done = False
        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()
        # Kinect runtime object, we want only color and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        self._frame_surface = pygame.Surface(
            (self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
        # here we will store skeleton data
        self._bodies = None
Ejemplo n.º 2
0
    def __init__(self):

        pygame.init()

        self._autoTraining = True

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Set the width and height of the screen [width, height]
        self._infoObject = pygame.display.Info()
        self._screen = pygame.display.set_mode(
            (self._infoObject.current_w >> 1, self._infoObject.current_h >> 1),
            pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)

        pygame.display.set_caption("Kinect for Windows v2 Body Game")

        # States
        self._done = False
        self._listening = False
        self._recording = False

        # Kinect runtime object, we want only color and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_Body)

        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        self._frame_surface = pygame.Surface(
            (self._kinect.color_frame_desc.Width,
             self._kinect.color_frame_desc.Height), 0, 32)

        # here we will store skeleton data
        self._bodies = None

        #here we will store the regression model
        self._neuralNets = []

        self._fileHandler = FileHandler(None, 'test.csv', [])
Ejemplo n.º 3
0
    def __init__(self):
        pygame.init()

        self.screenWidth = 1920
        self.screenHeight = 1080

        self.delta = [0] * 2
        self.flag = False

        self.prevx = [0] * 2
        self.curx = [0] * 2
        self.prevHand = [0] * 2
        self.curHand = [0] * 2
        self.heightState = [0] * 2

        self.gameover = False

        # Used to manage how fast the screen updates
        self.clock = pygame.time.Clock()

        # Set the width and height of the window [width/2, height/2]
        self.screen = pygame.display.set_mode(
            (960, 540), pygame.HWSURFACE | pygame.DOUBLEBUF, 32)

        # Loop until the user clicks the close button.
        self.done = False

        # Kinect runtime object, we want color and body frames
        self.kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_Body)

        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        self.frameSurface = pygame.Surface(
            (self.kinect.color_frame_desc.Width,
             self.kinect.color_frame_desc.Height), 0, 32)

        # here we will store skeleton data
        self.bodies = None
Ejemplo n.º 4
0
    def __init__(self):
        pygame.init()

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Set the width and height of the screen [width, height]
        self._infoObject = pygame.display.Info()
        self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1, self._infoObject.current_h >> 1), 
                                               pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)

        pygame.display.set_caption("Guess your Age!")

        # Loop until the user clicks the close button.
        self._done = False

        # Kinect runtime object, we want only color and body frames 
        if PyKinectRuntime:
            self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
            frame_dimension = self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height
        else:
            self._kinect = None
            frame_dimension = 800, 600

        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        self._frame_surface = pygame.Surface(frame_dimension, 0, 32)

        # here we will store skeleton data 
        self._bodies = None
        self._stored_bodies = {}

        self._faces = []
        self._face_bodies = []

        self._update_oxford = 0
        self.python_logo_image = pygame.image.load('pylogo.png')
        self.msft_logo_image = pygame.image.load('microsoftlogo.png')

        self.bg_color = pygame.Color(55, 117, 169)
Ejemplo n.º 5
0
    def InitKinect(self):
        '''
        Function that start or End the Kinect Sensor.
        '''
        # Connect Kinect.
        if self.StartEnd == 1:
            # Are configured the parameters to use the Kinect Sensor.
            pygame.init()
            self.Clock = pygame.time.Clock()
            self.Kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
            self.FrameSurface = pygame.Surface((self.Kinect.color_frame_desc.Width, self.Kinect.color_frame_desc.Height), 0, 32)
    
            self.Bodies = None                                                  # Variable used to saved the bodies detected.
            
            self.Btn_ConectKinect.setText("Desconect Kinect")                   # Change the text in the GUI button.

            self.StartEnd = 0                                                   # Change flag's value to end the Kinect Sensor.
            
            self.HideEmojies()                                                  # Hide the representations of the emotions in the GUI.
            
            self.Timer.start(1)                                                 # Active the interruption that show the image get with the Kinect Sensor.
        
        # Disconnect Kinect.          
        else:
            try:                
                self.Btn_ConectKinect.setText("Conect Kinect")                  # Change the text in the GUI button.

                self.StartEnd = 1                                               # Change the value flag's value to start the Kinect Sensor.

                self.Timer.stop()                                               # Deactivate the interruption that show the image get with the Kinect Sensor.
                
                self.Kinect.close()                         
                
                pygame.quit()
                
                self.ShowEmojies()                                              # Show the representations of the emotions in the GUI.
            except:
                pass
    def __init__(self):
        #Initialize Window & Kinect2
        print("Initializing Window & Kinect 2")
        pygame.init()
        self._clock = pygame.time.Clock()
        self._font = pygame.font.Font(None, 30)
        self._done = False
        self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Depth)
        self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
        self._infoObject = pygame.display.Info()
        self._screen = pygame.display.set_mode((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
        pygame.display.set_caption("Depth Perception Mask R-CNN Demo " + str(self._kinect.color_frame_desc.Width) + "x" + str(self._kinect.color_frame_desc.Height))
        print("Task Completed")

        #Initialize CNN & Coco Model
        print("Initializing TensorFlow & Coco Model")
        class InferenceConfig(CocoConfig):
            GPU_COUNT = 1
            IMAGES_PER_GPU = 1
            DETECTION_MIN_CONFIDENCE = 0
        config = InferenceConfig()
        self._model = modellib.MaskRCNN(mode="inference", config=config, model_dir=DEFAULT_LOGS_DIR)
        self._model.load_weights(COCO_MODEL_PATH, by_name=True)
Ejemplo n.º 7
0
def get_depth_and_color_frame():
    """Fetch both color and depth image from the Kinect V2

    :return: (1D array, 1D array) (depth_frame, color_frame)
    """
    kinect = PyKinectRuntime.PyKinectRuntime(
        PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Color)
    color_frame = []
    depth_frame = []
    has_depth = False
    has_color = False
    while True:
        if kinect.has_new_depth_frame() and not has_depth:
            depth_frame = kinect.get_last_depth_frame()
            has_depth = True
        if kinect.has_new_color_frame() and not has_color:
            color_frame = kinect.get_last_color_frame()
            has_color = True

        if has_depth and has_color:
            break

    return depth_frame, color_frame
Ejemplo n.º 8
0
    def __init__(self):
        pygame.init()

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Set the width and height of the screen [width, height]
        self._infoObject = pygame.display.Info()
        self._screen = pygame.display.set_mode(
            (self._infoObject.current_w >> 1, self._infoObject.current_h >> 1),
            pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)

        pygame.display.set_caption("Kinect Body detection")

        # Loop until the user clicks the close button.
        self._done = False
        self._kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_Body
            | PyKinectV2.FrameSourceTypes_Depth
            | PyKinectV2.FrameSourceTypes_BodyIndex)
        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        self._frame_surface = pygame.Surface(
            (self._kinect.color_frame_desc.Width,
             self._kinect.color_frame_desc.Height), 0, 32)
        # here we will store skeleton data
        self._bodies = None

        self._text_timer = [30, 30, 30,
                            30]  # timers, [push, draw, backup1, backup2]

        time.sleep(3)

        if self._kinect.has_new_color_frame():
            print('extracting all information....')
        else:
            print 'failed to extract.....'
Ejemplo n.º 9
0
    def __init__(self):
        pygame.init()

        self.screen_width = 1920
        self.screen_height = 1080

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Set the width and height of the window [width/2, height/2]
        self._screen = pygame.display.set_mode(
                        (960,540),
                        pygame.HWSURFACE|pygame.DOUBLEBUF, 32)

        # Loop until the user clicks the close button.
        self._done = False

        # Kinect runtime object, we want color and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)

        self._bodies = None
        self._frame_surface = pygame.Surface(
            (self._kinect.color_frame_desc.Width,
             self._kinect.color_frame_desc.Height),
             0,
             32)

        #Model Code Start
        self.model = Model(self._frame_surface,
                    [
                    #Cube(0, 0, 0, 200, self._frame_surface),
                    #Cube(300, 150, 0, 100, self._frame_surface),
                    # shirt(0, 0, 0, self._frame_surface,
                    #     [(43, 156, 54),(200,0,0),(61, 187, 198)])
                    ]
                    )
Ejemplo n.º 10
0
    def __init__(self, resolution_mode=1.0):
        self.resolution_mode = resolution_mode

        self._done = False

        # Kinect runtime object, we want only color and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_Body
            | PyKinectV2.FrameSourceTypes_Depth)

        # here we will store skeleton data
        self._bodies = None
        self.body_tracked = False
        self.joint_points = np.array([])
        self.joint_points3D = np.array([])
        self.joint_points_RGB = np.array([])
        self.joint_state = np.array([])

        self._frameRGB = None
        self._frameDepth = None
        self._frameDepthQuantized = None
        self._frameSkeleton = None
        self.frameNum = 0
Ejemplo n.º 11
0
    def __init__(self):
        global _key_press_rec_iter, _key_press_rec_label_uni, _key_press_rec_label_bil, _key_press_rec,recording_start_time
        # pygame.init()
        # glutInit()
        glfw.init()
        # glutInitDisplayMode(GLUT_DOUBLE )

        # Set the width and height of the screen [width, height]
        # self._infoObject = pygame.display.Info()
        # glutInitWindowSize((glutGet(GLUT_SCREEN_WIDTH) >> 1),(glutGet(GLUT_SCREEN_HEIGHT) >> 1))
        # glutInitWindowPosition(0,0)
        self._screen = glfw.CreateWindow(800,600,"Kinect for Windows v3 Body Game",NULL,NULL)
        glfwMakeContextCurrent(self._screen)
        # glutMainLoop()
        # self._screen = pygame.display.set_mode((self._infoObject.current_w >> 1, self._infoObject.current_h >> 1), HWSURFACE | DOUBLEBUF | OPENGL , 32)

        # pygame.display.set_caption("Kinect for Windows v3 Body Game")

        # Loop until the user clicks the close button.
        self._done = False

        self.now = 0

        # Used to manage how fast the screen updates
        self._clock = glfwGetTime()


        # Kinect runtime object, we want only color and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime( PyKinectV2.FrameSourceTypes_Color |  PyKinectV2.FrameSourceTypes_Infrared | PyKinectV2.FrameSourceTypes_Body | PyKinectV2.FrameSourceTypes_Depth)

        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        # self._frame_surface = pygame.Surface( (self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)

        # self._frame_surface = pygame.transform.flip(self._frame_surface,True,False)
        
        self.frames = np.zeros((100,self._kinect.color_frame_desc.Height, self._kinect.color_frame_desc.Width,3), dtype=np.uint8)

        # here we will store skeleton data
        self._bodies = None

        self._joints_with_time = None
        self.path = None
        self._key_press = None
        # self._key_press_rec = [['Baseline/Start_Asana',0]]
        
        # self._key_press_rec_label = ['Start time of asana',"Start time of asana's hold-time","End time of asana's hold-time","End time of asana"]
        # self._cnt = 0
        self._frameno = 0
        self.hot_keys = []
        self._video_frameno = None
        self._timestamps = None
        self._video_color = None
        self._video_depth = None
        self._video_infrared = None

        self._audio = None
        self.MOUSE_BUTTON_DOWN = 1
        self.MOUSE_BUTTON_UP = 0

        self.clicked = False
        self.prev_mouse_state = self.MOUSE_BUTTON_DOWN

        self.isRecording = False

        self.sound_thread = None
        # self.keyboard_thread = None
        self.ir_counter = 0
        self.depth_counter = 0
        self.audio_frames = []
        self.audio_stop_flag = False
        self.audio_is_stopped = False
    def __init__(self):
        pygame.init()

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Loop until the user clicks the close button.
        self._done = False

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Kinect runtime object, we want only color and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Infrared)

        # back buffer surface for getting Kinect infrared frames, 8bit grey, width and height equal to the Kinect color frame size
        self._frame_surface = pygame.Surface(
            (self._kinect.infrared_frame_desc.Width,
             self._kinect.infrared_frame_desc.Height), 0, 24)
        # here we will store skeleton data
        self._bodies = None

        # Set the width and height of the screen [width, height]
        self._infoObject = pygame.display.Info()
        self._screen = pygame.display.set_mode(
            (self._kinect.infrared_frame_desc.Width,
             self._kinect.infrared_frame_desc.Height),
            pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)

        self.target = [100, 100]
        self.turnCheck = False

        pygame.display.set_caption("Kinect for Windows v2 Infrared")

        ## Bluetooth
        target_name = "HC-05"
        target_address = None

        while (target_address is None):
            nearby_devices = bluetooth.discover_devices()
            print(nearby_devices)

            for bdaddr in nearby_devices:
                print(bluetooth.lookup_name(bdaddr))
                if target_name == bluetooth.lookup_name(bdaddr):
                    target_address = bdaddr
                    break

        if target_address is not None:
            print("found target bluetooth device with address ",
                  target_address)
            self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)

            print("Trying connection")

            i = 0  # ---- your port range starts here
            maxPort = 3  # ---- your port range ends here
            err = True
            while err == True and i <= maxPort:
                print("Checking Port ", i)
                port = i
                try:

                    self.sock.connect((target_address, port))
                    err = False
                except Exception:
                    ## print the exception if you like
                    i += 1
            if i > maxPort:
                print("Port detection Failed.")
                return

            # print("Trying sending")
            # self.sock.send("1 2 3")
            # print("Finished sending")
        else:
            print("could not find target bluetooth device nearby")
 def __init__(self):
     # Initialize Kinect
     self.kinect = PyKinectRuntime.PyKinectRuntime(
         PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Depth)
     print(type(self.kinect))
Ejemplo n.º 14
0
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime

# ---- plot graph ------
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D, proj3d
import matplotlib.animation as animation
import time

from ML.joints_25.model_ML import create_2stream_model
import pickle


kinect_obj = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body)
bodies = None


# choose_joints = np.array([4, 21, 9, 10, 11, 5, 6, 7, 17, 18, 19, 13, 14, 15 ]) - 1 # start from 0
# select_column = []
# for i in range(14): # 14 body join( except waist)
#     select_column.append(0 + 3*choose_joints[i]) # select x
#     select_column.append(1 + 3*choose_joints[i]) # select y
#     select_column.append(2 + 3*choose_joints[i]) # select z 

# # bone_list COCO 14 joint from 25
# bone_list = [[7,6], [6,5], [5,1], [1,0], [1,2], [2,3], [3,4],
#              [1,8], [1,11], [8,9], [9,10], [11,12], [12,13]]

# full bonelist 25 joints
Ejemplo n.º 15
0
### Coloured point cloud, joint and joint orientation in 3D using Open3D
########################################################################
import cv2
import numpy as np
import utils_PyKinectV2 as utils
import open3d
from numpy.linalg import inv
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectV2
from pykinect2 import PyKinectRuntime

#############################
### Kinect runtime object ###
#############################
kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Body
                                         | PyKinectV2.FrameSourceTypes_Color
                                         | PyKinectV2.FrameSourceTypes_Depth)

depth_width, depth_height = kinect.depth_frame_desc.Width, kinect.depth_frame_desc.Height  # Default: 512, 424
color_width, color_height = kinect.color_frame_desc.Width, kinect.color_frame_desc.Height  # Default: 1920, 1080

##############################
### User defined variables ###
##############################
depth_scale = 0.001  # Default kinect depth scale where 1 unit = 0.001 m = 1 mm
clipping_distance_in_meters = 1.5  # Set the maximum distance to display the point cloud data
clipping_distance = clipping_distance_in_meters / depth_scale  # Convert dist in mm to unit
width = depth_width
height = depth_height
ppx = 260.166
ppy = 205.197
Ejemplo n.º 16
0
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import numpy as np
import cv2
import ctypes
import _ctypes

kinect_ir = PyKinectRuntime.PyKinectRuntime(
    PyKinectV2.FrameSourceTypes_Infrared | PyKinectV2.FrameSourceTypes_Depth)
kinect_color = PyKinectRuntime.PyKinectRuntime(
    PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body
    | PyKinectV2.FrameSourceTypes_Depth)
depth_width, depth_height = kinect_ir.depth_frame_desc.Width, kinect_ir.depth_frame_desc.Height


def correction(img):
    a = 1.0
    b = 0.02
    img_1 = img[:, :50].copy()
    img_1 = cv2.resize(img_1, dsize=None, fx=a + b * 1, fy=1.0)
    img_2 = img[:, 50:100].copy()
    img_2 = cv2.resize(img_2, dsize=None, fx=a + b * 2, fy=1.0)
    img_3 = img[:, 100:150].copy()
    img_3 = cv2.resize(img_3, dsize=None, fx=a + b * 3, fy=1.0)
    img_4 = img[:, 150:200].copy()
    img_4 = cv2.resize(img_4, dsize=None, fx=a + b * 4, fy=1.0)
    img_5 = img[:, 200:250].copy()
    img_5 = cv2.resize(img_5, dsize=None, fx=a + b * 5, fy=1.0)
    img_6 = img[:, 250:300].copy()
    img_6 = cv2.resize(img_6, dsize=None, fx=a + b * 6, fy=1.0)
Ejemplo n.º 17
0
    def __init__(self,
                 host,
                 port,
                 camtype="webcam",
                 ID=0,
                 image_name='lena.png',
                 change=True,
                 Debug=True):
        self.host = host
        self.port = port
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.sock.bind((self.host, self.port))
        self.comThreads = []
        self.alive = True
        self.RGB0 = []
        self.Depth = []
        self.Body = []
        self.camtype = camtype
        self.ret = False
        self.log = "test"
        self.HDRGB = []
        self.imageName = image_name
        self.change = change
        self.sys_random = random.SystemRandom()
        #Assuming 8bit pic
        self.cnt = 0
        self.trip = 0
        self.Debug = Debug

        #Locks
        self.Lock = threading.Lock()

        if self.Debug:
            self.img = cv2.imread(self.imageName)
            self.ImageT = threading.Thread(target=self.imagechanger)
            self.ImageT.start()
            if self.ImageT.isAlive():
                self.log = "alive"
        if Kinect:
            pygame.init()

            #Used to manage how fast the screen updates
            self._clock = pygame.time.Clock()
            self._done = False
            self._infoObject = pygame.display.Info()
            self._screen = pygame.display.set_mode(
                (self._infoObject.current_w >> 1,
                 self._infoObject.current_h >> 1),
                pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)

            # Kinect runtime object, we want only color and body frames
            self._kinect = PyKinectRuntime.PyKinectRuntime(
                PyKinectV2.FrameSourceTypes_Color
                | PyKinectV2.FrameSourceTypes_Body
                | PyKinectV2.FrameSourceTypes_Depth
                | PyKinectV2.FrameSourceTypes_Infrared)
            # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
            self._frame_surface = pygame.Surface(
                (self._kinect.color_frame_desc.Width,
                 self._kinect.color_frame_desc.Height), 0, 32)
            # here we will store skeleton data
            self._bodies = None

        if camtype == "webcam":

            self.cap = cv2.VideoCapture(ID)
Ejemplo n.º 18
0
    def __init__(self, save_prefix, save_on_record):
        """
    Record color, depth and body index stream from Kinect v2. The performance
    is bad. Press any key to start recording.

    On my PC:
    * If you want to display the stream, the application will run at 4 fps.
    * If you you want to write color stream to video file while recording, it
    will run at 10 ~ 30 fps.
    * If you want to write all color frames to video file together after
    recording, it can run at 30 fps - perfect. However, it will eat 14GB RAM per
    minute. You need to monitor your RAM usage and stop recording before your
    RAM being used up.

    When start recording, the screen will not be updated to ensure recording
    speed. To stop recording, just close the window. It will take some time to
    save the data.

    Parameters
    ----------
    save_prefix: Path to save the recorded files. Color stream will be saved to
    `save_prefix`_color.avi, depth stream will be saved to
    `save_prefix`_depth.pkl as a list of ndarrays, body index will be saved to
    `save_prefix`_body.pkl also as a list of ndarrays.

    save_on_record: Whether to save color stream to video file while recording.

    """
        self.kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Depth
            | PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_BodyIndex)
        self.depth_height = self.kinect.depth_frame_desc.Height
        self.depth_width = self.kinect.depth_frame_desc.Width
        self.color_height = self.kinect.color_frame_desc.Height
        self.color_width = self.kinect.color_frame_desc.Width
        self.body = np.zeros([self.depth_height, self.depth_width])
        self.color = np.zeros([self.color_height, self.color_width, 3])
        self.depth = np.zeros([self.depth_height, self.depth_width])
        self.color_out = None
        self.color_frames = []
        self.depth_frames = []
        self.body_frames = []
        self.fps = 30
        self.recording = False
        self.save_prefix = save_prefix
        self.save_on_record = save_on_record
        self.min_fps = 30

        pygame.init()
        self.surface = pygame.Surface(
            (self.color_width + self.depth_width, self.color_height), 0, 24)
        self.hw_ratio = self.surface.get_height() / self.surface.get_width()

        # screen layout: # is color stream, * is depth, & is body index
        #  ----------------------
        # |################# *****|
        # |################# *****|
        # |################# *****|
        # |################# &&&&&|
        # |################# &&&&&|
        # |################# &&&&&|
        #  ----------------------
        scale = 0.6
        self.screen = pygame.display.set_mode(
            (int(self.surface.get_width() * scale),
             int(self.surface.get_height() * scale)),
            pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 24)
        self.done = False
        self.clock = pygame.time.Clock()
        pygame.display.set_caption('Kinect Human Recorder')

        self.frame = np.ones(
            [self.surface.get_height(),
             self.surface.get_width(), 3])
Ejemplo n.º 19
0
    def __init__(self, w, h):
        #pygame.init()
        self.c = 0
        self.x0 = []
        self.flag = False

        self.done = False
        self.over = False
        self.gameover = False
        self.gameover1 = False
        self.passed = False
        self.width = w
        self.height = h
        #get the game surface
        #self.gobangGameSurface = pygame.display.set_mode((self.width,self.height))
        self.gobangGameSurface = pygame.Surface((self.width, self.height))
        #get the chess board surface
        self.boardWidth = 450
        self.boardHeight = 450
        # the center of the board
        self.centerx = self.width / 2
        self.centery = self.height / 2
        self.gobang = pygame.Surface((self.boardWidth, self.boardHeight))
        self.rows = 15
        self.cols = 15
        self.me = True  # the player goes first
        self.chessBoard = [([0] * self.cols) for row in range(self.rows)
                           ]  #storing current grid info
        self.chessBoardColor = [([0] * self.cols) for row in range(self.rows)]
        self.count = 0
        self.win = []
        #self.win = [(([False]*3) for col in range(self.cols)) for row in range(self.rows)]

        #temp2d = []
        #for col in range(self.cols): temp2d += [[False]*572]
        #for row in range(self.rows): self.win += [temp2d]
        self.win = testList = [[[False for k in range(572)]
                                for j in range(self.cols)]
                               for i in range(self.rows)]
        print(len(self.win), len(self.win[0]), len(self.win[0][0]))
        self.initializeWinCount()
        # initialize the number of chess dropped for each winning ways
        print(self.count)
        for i in range(15):
            for j in range(15):
                if self.win[i][j][0] == True:
                    print((i, j))
        self.myDrop = [0] * self.count
        self.pcDrop = [0] * self.count
        self.clock = pygame.time.Clock()
        self.kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_Body)
        self.cursorRight = Hand("right")
        self.bodies = None
        self.curRightHandY = self.curRightHandX = self.curLeftHandX = self.curLeftHandY = None

        self.gameOverPage = MyImage("icon/instructions/gameOver.gif",
                                    self.width / 2, self.height / 2)
        self.replayButton = Button("icon/buttons/replay.gif", self.width / 2,
                                   self.height * 3 / 4)
        self.gameOverPageButtons = [self.replayButton]
Ejemplo n.º 20
0
    def __init__(self):
        pygame.init()

        # Set the width and height of the screen [width, height]
        self._infoObject = pygame.display.Info()
        self._screen = pygame.display.set_mode(
            (self._infoObject.current_w >> 1, self._infoObject.current_h >> 1),
            pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)

        pygame.display.set_caption("Kinect for Windows v2 Body Game")

        # Loop until the user clicks the close button.
        self._done = False

        self.now = 0

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Kinect runtime object, we want only color and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_Infrared
            | PyKinectV2.FrameSourceTypes_Body
            | PyKinectV2.FrameSourceTypes_Depth)

        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        self._frame_surface = pygame.Surface(
            (self._kinect.color_frame_desc.Width,
             self._kinect.color_frame_desc.Height), 0, 32)

        self.frames = np.zeros((100, self._kinect.color_frame_desc.Height,
                                self._kinect.color_frame_desc.Width, 3),
                               dtype=np.uint8)

        # here we will store skeleton data
        self._bodies = None

        self._joints_with_time = None
        self.path = None
        self._key_press = None
        # self._cnt = 0
        self._frameno = 0
        self._video_frameno = None
        self._timestamps = None
        self._video_color = None
        self._video_depth = None
        self._video_infrared = None

        self._audio = None
        self.MOUSE_BUTTON_DOWN = 1
        self.MOUSE_BUTTON_UP = 0

        self.clicked = False
        self.prev_mouse_state = self.MOUSE_BUTTON_DOWN

        self.isRecording = False

        self.sound_thread = None
        self.ir_counter = 0
        self.depth_counter = 0
Ejemplo n.º 21
0
def save_frames(FILE_NAME):
    #records and saves colour and depth frames from the Kinect

    print("Saving colour and depth frames")

    # define file names
    depthfilename = "DEPTH." + FILE_NAME + ".pickle"
    colourfilename = "COLOUR." + FILE_NAME + ".pickle"
    depthfile = open(depthfilename, 'wb')
    colourfile = open(colourfilename, 'wb')

    #initialise kinect recording, and some time variables for tracking the framerate of the recordings
    kinect = PyKinectRuntime.PyKinectRuntime(
        PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Depth)
    starttime = time.time()
    oldtime = 0
    i = 0
    fpsmax = 0
    fpsmin = 100

    # display_type = "COLOUR"
    display_type = "DEPTH"

    # Actual recording loop, exit by pressing escape to close the pop-up window
    while True:

        if kinect.has_new_depth_frame() and kinect.has_new_color_frame():
            elapsedtime = time.time() - starttime
            if (elapsedtime > i / 10):

                #Only for high i try evalutaing FPS or else you get some divide by 0 errors
                if i > 10:
                    try:
                        fps = 1 / (elapsedtime - oldtime)
                        print(fps)
                        if fps > fpsmax:
                            fpsmax = fps
                        if fps < fpsmin:
                            fpsmin = fps

                    except ZeroDivisionError:
                        print("Divide by zero error")
                        pass

                oldtime = elapsedtime

                #read kinect colour and depth data (somehow the two formats below differ, think one is and one isnt ctypes)
                depthframe = kinect.get_last_depth_frame()  #data for display
                depthframeD = kinect._depth_frame_data
                colourframe = kinect.get_last_color_frame()
                colourframeD = kinect._color_frame_data

                #convert depth frame from ctypes to an array so that I can save it
                depthframesaveformat = np.copy(
                    np.ctypeslib.as_array(
                        depthframeD,
                        shape=(kinect._depth_frame_data_capacity.value, ))
                )  # TODO FIgure out how to solve intermittent up to 3cm differences
                pickle.dump(depthframesaveformat, depthfile)

                #reformat the other depth frame format for it to be displayed on screen
                depthframe = depthframe.astype(np.uint8)
                depthframe = np.reshape(depthframe, (424, 512))
                depthframe = cv2.cvtColor(depthframe, cv2.COLOR_GRAY2RGB)

                #Reslice to remove every 4th colour value, which is superfluous
                colourframe = np.reshape(colourframe, (2073600, 4))
                colourframe = colourframe[:, 0:3]

                #extract then combine the RBG data
                colourframeR = colourframe[:, 0]
                colourframeR = np.reshape(colourframeR, (1080, 1920))
                colourframeG = colourframe[:, 1]
                colourframeG = np.reshape(colourframeG, (1080, 1920))
                colourframeB = colourframe[:, 2]
                colourframeB = np.reshape(colourframeB, (1080, 1920))
                framefullcolour = cv2.merge(
                    [colourframeR, colourframeG, colourframeB])
                pickle.dump(framefullcolour, colourfile)

                if display_type == "COLOUR":

                    #Show colour frames as they are recorded
                    cv2.imshow('Recording KINECT Video Stream',
                               framefullcolour)

                if display_type == "DEPTH":

                    #show depth frames as they are recorded
                    cv2.imshow('Recording KINECT Video Stream', depthframe)

                i = i + 1

        #end recording if the escape key (key 27) is pressed
        key = cv2.waitKey(1)
        if key == 27: break
    cv2.destroyAllWindows()
Ejemplo n.º 22
0
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import numpy as np
import cv2

kinectD = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)
kinectC = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)

while True:
    # --- Getting frames and drawing
    #if kinectD.has_new_depth_frame():
    if kinectC.has_new_color_frame():
        frameD = kinectD.get_last_depth_frame()
        frameC = kinectC.get_last_color_frame()
        frameD = frameD.astype(np.uint8)
        frameC = np.reshape(frameC, (1080, 1920, 4))
        frameD = np.reshape(frameD, (424, 512))
        outputC = cv2.resize(frameC, (0, 0), fx=0.5, fy=0.5)
        outputD = cv2.resize(frameD, (0, 0), fx=1.0, fy=1.0)
        cv2.imshow('KINECT Video StreamC', outputC)
        cv2.imshow('KINECT Video StreamD', outputD)
        frame = None

    key = cv2.waitKey(1)
    if key == 27: break
    def __init__(self):
        pygame.init()

        self.screen_width = 1920
        self.screen_height = 1080

        self.prev_left_hand_height = 0
        self.cur_left_hand_height = 0
        self.left_hand_lift = 0
        self.left_hand_tutorial_weight = .14
        self.tutorial_up = 0

        self.prev_left_hand_height2 = 0
        self.cur_left_hand_height2 = 0
        self.left_hand_lift2 = 0
        self.tutorial_up2 = 0

        self.prev_left_hand_width = 0
        self.cur_left_hand_width = 0
        self.left_hand_swipe = 0
        self.left_hand_power_weight = .14

        self.prev_left_hand_width2 = 0
        self.cur_left_hand_width2 = 0
        self.left_hand_swipe2 = 0

        self.right_hand_pos = (0, 0)
        self.radius_to_track = 50
        self.is_wand_tracking = 0
        self.is_wand_tracking2 = 0
        self.wand_width = 30
        self.wand_width2 = 20

        self.wand_color = (255, 0, 0)
        self.wand_grip_color = (200, 200, 200)
        self.wand_wide = 90  #For drawing the wand
        self.wand_high = 90

        self.wand_pos = (
            self.screen_width // 2, self.screen_height * 3 / 5
        )  #Will be set to right hand pos if right hand closes on it
        self.wand_tip = (self.wand_pos[0] + self.wand_wide,
                         self.wand_pos[1] + self.wand_high)
        self.default_wand_pos = self.wand_pos
        self.default_wand_tip = self.wand_tip
        self.wand_scale = 1.2
        self.grip_proportion = .2
        self.grip_width_proportion = 1.2

        self.wand_color2 = (0, 0, 255)
        self.wand_grip_color2 = (200, 200, 200)
        self.wand_wide2 = 90
        self.wand_high2 = 90

        self.wand_pos2 = (self.screen_width // 4 * 3,
                          self.screen_height * 3 / 5)
        self.wand_tip2 = (self.wand_pos2[0] + self.wand_wide,
                          self.wand_pos2[1] + self.wand_high)
        self.default_wand_pos2 = self.wand_pos2
        self.default_wand_tip2 = self.wand_tip2
        self.wand_scale2 = 1.5
        self.grip_proportion2 = .2
        self.grip_width_proportion2 = 1.3

        self.health_color = (0, 0, 255)
        self.power_color = (255, 255, 255)
        self.bar_height = 30
        self.bar_dist_from_head = 60
        self.player_label_distance = 60
        self.player_label_radius = 20

        self.body_list = [-1, -1]

        self.max_health = 100
        self.health = self.max_health

        self.max_health2 = 100
        self.health2 = self.max_health2

        self.max_power = 50
        self.power = self.max_power - 30
        self.power_increase = 5

        self.max_power2 = 50
        self.power2 = self.max_power2 - 30
        self.power_increase2 = 5

        self.trace = []  #Used to store player 1's trace of the circles
        self.trace2 = []  #Same for player 2

        self.max_spell_length = 4
        self.max_spell_length2 = 4

        #To draw the spellcasting circles
        self.circle_separation_radius = 200
        self.circle_radius = 50
        self.spell_circle_color = self.wand_color
        self.circles = []

        self.circle_separation_radius2 = 200
        self.circle_radius2 = 50
        self.spell_circle_color2 = self.wand_color2
        self.circles2 = []

        # Defining spells
        (self.north, self.south, self.east, self.west, self.clear) = (0, 1, 2,
                                                                      3, 4)

        #They are easier to reference as variable names
        self.expelliarmus = "Expelliarmus"
        self.expelliarmus_power = 20
        self.stupefy = "Stupefy"
        self.stupefy_power = 30
        self.stupefy_damage = 30
        self.protego = "Protego"
        self.protego_power = 50

        self.spell_book = dict()
        self.spell_book = {
            (self.south, self.north): self.expelliarmus,
            (self.east, self.west): self.stupefy,
            (self.west, self.north, self.east): self.protego
        }
        """
        E will make the wand reset back to the original spot and reset the opponent's spell trace
            Uses 20 power
        Stupefy takes off 30 health
            Uses 30 power
        Protego protects against next spell cast, but stops caster from casting another spell
            Uses 50 power
        Swiping right with the left hand to restore power

        """
        #Effects due to spells
        self.blocking = False
        self.blocking2 = False

        self.spell = ""
        self.spell2 = ""

        self.damage_modifier = 1
        self.damage_modifier2 = 1

        self.winner = -1
        self.start_screen = 1

        #Because the wand is still in the circle after casting the spell
        # and we don't want this to roll over to the next trace
        # But we still want the current circle to be an option
        self.spell_clear = 0
        self.spell_clear2 = 0

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Set the width and height of the window [width/2, height/2]
        # Change this to make it fullscreen or not
        self._screen = pygame.display.set_mode(
            (960, 540), pygame.HWSURFACE | pygame.DOUBLEBUF, 32)

        # Loop until the user clicks the close button.
        self._done = False

        # Kinect runtime object, we want color and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Color
            | PyKinectV2.FrameSourceTypes_Body)

        # back buffer surface for Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        self._frame_surface = pygame.Surface(
            (self._kinect.color_frame_desc.Width,
             self._kinect.color_frame_desc.Height), 0, 32)

        # here we will store skeleton data
        self._bodies = None
Ejemplo n.º 24
0
# -*- coding: utf-8 -*-
import time
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from Kinetic import extractPoints
from numpy import *
#import pyttsx

k = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Body)
print "Kinect lance"
#e = pyttsx.init()

#e.say('Bonjour et bienvenu dans la prossaidure de calibration de la machine vivante. Une personne doit se mettre debout au centre de la saine, face public, les bras ecartai comme jaizu cri. et une autre personne est praite a tourner la Kinect selon l''axe Z. Tenez vous prai dans dix, neuf, huit, sept, six, cinq, quatre, trois, deux, un.')
#e.runAndWait()
calib = True
while calib:
    time.sleep(0.1)
    seeBody = False
    if k.has_new_body_frame():
        bs = k.get_last_body_frame()
        tiltrad = arctan(bs.floor_clip_plane.z / bs.floor_clip_plane.y)
        w = bs.floor_clip_plane.w
        #print tiltrad*180.0/pi,w
        if bs is not None:
            for b in bs.bodies:
                if not b.is_tracked:
                    continue
                # get joints positions
                js = b.joints
                kpos = extractPoints(js, tiltrad, w, 0.0, 0.0)
Ejemplo n.º 25
0
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime

import time
import numpy as np
import cv2

depth_image_size = (424,512)

kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)

while(1):
    if kinect.has_new_depth_frame():
        depth_frame = kinect.get_last_depth_frame()
        depth_frame = depth_frame.reshape(depth_image_size)

        # map the depth frame to uint8 
        depth_frame = depth_frame * (256.0/np.amax(depth_frame))

        colorized_frame = cv2.applyColorMap(np.uint8(depth_frame), cv2.COLORMAP_JET)        
        cv2.imshow('depth',colorized_frame)
    
    if (cv2.waitKey(1) == 27):
        cv2.destroyAllWindows()
        kinect.close()
        break
def detect_video(model):
    kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)

    input_size = [int(model.net_info['height']), int(model.net_info['width'])]
    colors = pkl.load(open("yolov3-master\pallete", "rb"))
    classes = load_classes("yolov3-master\data\coco.names")
    colors = [colors[1]]

    # cap is the video captured by camera
    # for surface 0 is front 1 is back 2 is kinect
    cap = cv2.VideoCapture(1 + cv2.CAP_DSHOW)

    width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(
        cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    fps = cap.get(cv2.CAP_PROP_FPS)
    #print(fps)

    read_frames = 0

    start_time = datetime.now()
    #print('Start Detect')
    #set times as 1
    #num=0
    #sample array
    #s=[[] for i in range(20)]
    #s=[]
    #!while loop!
    locationx = []
    locationy = []
    locationd = []
    while cap.isOpened():

        #print('Detecting')
        retflag, frame = cap.read()
        '''
        frame need to be corpped and then resize to 512*424
        '''

        cv2.circle(frame, (960, 540), 5, [0, 255, 255], thickness=-1)
        #2 feet-41
        #3 feet-41
        #4 feet-37
        #x=154
        y = 0
        h = 1080
        #h=1272
        #w=1611
        w = 1920 / 84.1 * 70.6
        w = int(w)
        x = (1920 - w) / 2 + 140
        x = int(x)
        dim = (512, 380)
        frame = frame[y:y + h, x:x + w]
        frame = cv2.resize(frame, dim)

        read_frames += 1
        if retflag:
            '''
            get depth frame
            '''
            if kinect.has_new_depth_frame():
                Dframe = kinect.get_last_depth_frame()
                frameD = kinect._depth_frame_data
                Dframe = Dframe.astype(np.uint8)
                #print(frame)

                Dframe = np.reshape(Dframe, (424, 512))
                dx = 0
                dy = 22
                dh = 380
                dw = 512
                dim = (512, 380)
                Dframe = Dframe[dy:dy + dh, dx:dx + dw]
                frame = cv2.resize(frame, dim)

                Dframe = cv2.cvtColor(Dframe, cv2.COLOR_GRAY2RGB)

                def click_event(event, x, y, flags, param):
                    if event == cv2.EVENT_RBUTTONDOWN:
                        print(x, y)
                    if event == cv2.EVENT_LBUTTONDOWN:
                        Pixel_Depth = frameD[(((22 + y) * 512) + x)]
                        print("x ", x, "y ", y, "Depth", Pixel_Depth)

                '''
                get RGB frame
                '''
            frame_tensor = cv_image2tensor(frame, input_size).unsqueeze(0)
            frame_tensor = Variable(frame_tensor)

            #if torch.cuda.is_available:
            frame_tensor = frame_tensor.cuda()
            detections = model(frame_tensor, True).cpu()
            #orange order
            #flag0=0
            flag1 = 0
            detections = process_result(detections, 0.5, 0.4)
            if len(detections) != 0:
                #3.3
                global flag
                print(flag)
                time.sleep()
                detections = transform_result(detections, [frame], input_size)
                num = len(detections)
                for detection in detections:
                    Label = int(detection[-1])
                    #flag=flag+1
                    if Label == 49:
                        flag1 = flag1 + 1
                        #detection=[]
                        center = draw_bbox([frame], detection, colors, classes)
                        #print(Label)
                        #print('cc is',center)
                        Dcenter = draw_bbox([Dframe], detection, colors,
                                            classes)
                        #import k
                        #k=0.105
                        #x,y,d=get_depth(center,kinect,k)
                        #redraw the boundary box
                        img = Dframe
                        nx, x, y, d = get_depth(center, kinect)
                        cv2.circle(img, (nx, y), 5, [0, 0, 255], thickness=-1)
                        #send to robot
                        x = center[0]
                        y = center[1]
                        x = x - 512 / 2 + 40
                        y = y - 318 / 2 - 35
                        y = -y
                        x = -x
                        #position
                        print("x ", x, "y ", y, "d ", d)
                        l = len(locationx)
                        #if True:
                        #pick up the orange
                        #for the nth orange(n==flag)
                        if flag1 == flag:
                            #approciate depth
                            if d > 500 and d < 1050:
                                locationx.append(x)
                                locationy.append(y)
                                locationd.append(d)
                                print(locationx)
                                print(l)
                                if l > 6:
                                    x1 = locationx[l - 6:l - 1]
                                    y1 = locationy[l - 6:l - 1]
                                    d1 = locationd[l - 6:l - 1]
                                    diff = np.var(x1) + np.var(y1) + np.var(d1)
                                    print(x1)
                                    print(y1)
                                    print(d1)
                                    print(diff)
                                    # less fluctuation
                                    if diff < 20:
                                        print("get position")
                                        x = np.average(x1)
                                        y = np.average(y1)
                                        d = np.average(d1)
                                        #scale x,y
                                        k1 = 1.48
                                        x = x * k1 * d / 512
                                        y = y * k1 * d / 318 * 0.72
                                        #x=x*k1
                                        #y=y*k1
                                        #actual position
                                        print(flag, x, y, d)
                                        #send to raspbaerry pi
                                        flag = ClientSocket(num, flag, x, y, d)
                                        #reset the data
                                        locationx = []
                                        locationy = []
                                        locationd = []

                        #choose stable on as result
                        #sample function()
                        #num=num+1

            cv2.imshow('RGBFrame', frame)
            cv2.imshow('DepthFrame', Dframe)

            #print("x: ", x ,"y: ", y)
            #print("_______________________")

            if read_frames % 30 == 0:
                print('Number of frames processed:', read_frames)

                #print('average FPS',float(read_frames/datetime.now()))
            #if flag0:
            #locationx=[]
            #locationy=[]
            #locationd=[]

            if not False and cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    end_time = datetime.now()
    print('Detection finished in %s' % (end_time - start_time))
    print('Total frames:', read_frames)
    cap.release()

    cv2.destroyAllWindows()

    return
	def __init__(self):
		self.kinect=PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Body)
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime

import ctypes
import _ctypes
from _ctypes import COMError
import comtypes
import sys
import numpy
import time

import cv2

# Initialize Kinect sensor
kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)

while (True):
    if kinect.has_new_color_frame():
        frame = kinect.get_last_color_frame()
        print(frame.shape)
        reshapedFrame = frame.reshape(1080, 1920, 4)
        cv2.imshow("frame", reshapedFrame)
        frame = None
        cv2.waitKey(20)
Ejemplo n.º 29
0
    def __init__(self):

        # Kinect runtime object, we want only depth and body frames
        self._kinect = PyKinectRuntime.PyKinectRuntime(
            PyKinectV2.FrameSourceTypes_Infrared
            | PyKinectV2.FrameSourceTypes_Depth)
Ejemplo n.º 30
0
    def __init__(self):
        pygame.init()
        pygame.mixer.init()

        self.beep_sound = pygame.mixer.Sound('audio\\beep.ogg')
        self.buzz_sound = pygame.mixer.Sound('audio\\buzz.ogg')
        self._screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN, 32)

        pygame.display.set_caption("Kinect Game Framework Test")

        self.finished = False
        self._clock = pygame.time.Clock()
        self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color |
                                                       PyKinectV2.FrameSourceTypes_Body)
        self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width,
                                              self._kinect.color_frame_desc.Height), 0, 32)
        self._bodies = None

        self.score = 0

        self.vocab_dict = {"People drive ____ these days.":["quickly", "quick"],
                           "She has an ____ dog.":["active", "actively"],
                           "He ____ opens the mail.":["carefully", "careful"],
                           "The man ____ greets his friends.":["cheerfully", "cheerful"],
                           "That is a ____ sofa!":["comfortable", "comfortably"],
                           "The alarm sounds ____.":["continuously", "continuous"],
                           "That woman is ____!":["crazy", "crazily"],
                           "The woman speaks ____.":["delightfully", "delightful"],
                           "Juan is a very ____ carpenter.":["creative", "creatively"],
                           "Wow! That is a ____ storm!":["destructive", "destructively"],
                           "The racecar drove ____ by the school.":["powerfully", "powerful"],
                           "Juana ____ said NO!":["firmly", "firm"],
                           "He ____ opened the door.":["forcefully", "forceful"],
                           "It was a ____ day.":["glorious", "gloriously"],
                           "Maria ____ observed her ex-boyfriend.":["hatefully", "hateful"],
                           "He had a ___ idea.":["hopeful", "hopefully"],
                           "It was an ____ phrase.":["insulting", "insultingly"],
                           "Jenny ____ ate the last cookie.":["intentionally", "intentional"],
                           "He likes ____ music.":["irritating", "irritatingly"],
                           "Careful! That is a ___ dog!":["bad", "badly"],
                           "The man reacted ___ to the good news.":["speedily", "speedy"],
                           "Susana has always been a ____ girl.":["nice", "nicely"],
                           "The boys plunged into the ____ water.":["deep", "deeply"],
                           "The girl ____ saved her cat from the fire.":["bravely", "brave"],
                           "The man ____ drank too much alcohol.":["foolishly", "foolish"],
                           "Mario is ____ and never does his homework.":["lazy", "lazily"],
                           "The teacher is very ____.":["rude", "rudely"],
                           "The girl plays soccer ____.":["perfectly", "perfect"],
                           "It was an ____ crash.":["accidental", "accidentally"],
                           "That is an ____ turtle!.":["angry", "angrily"],
                           "She ____ ate her beans.":["happily", "happy"],
                           "John spoke ____.":["seriously", "serious"],
                           "Firulais is a ____ dog.":["loyal", "loyally"],
                           "Margie yelled ____ into the night.":["blindly", "blind"],
                           "He ran ____ toward me.":["wildly", "wild"],
                           "Pedro is ____!":["innocent", "innocently"],
                           "The gross man winked at her ____.":["sexually", "sexual"],
                           "Concepcion is a ____ girlfriend.":["jealous", "jealously"],
                           "Luis ____ goes to the bar.":["frequently", "frequent"],
                           "We didn't go out because it was raining ____.":["heavily", "heavy"],
                           "Our team lost the game because we played ____.":["badly", "bad"],
                           "We waited ____.":["patiently", "patient"],
                           "Jimmy arrived ____.":["unexpectedly", "unexpected"],
                           "Mike stays fit by playing tennis ____.":["regularly", "regular"],
                           "The driver of the car was ____ injured.":["seriously", "serious"],
                           "The driver of the car had ____ injuries.":["serious", "seriously"],
                           "Ismael looked ____ at Eleazar.":["hungrily", "hungry"],
                           "She is a ____ driver.":["dangerous", "dangerously"]}

        self._frame_surface.fill((255, 255, 255))