def __init__(self):
        # Retrieve all settings
        self.settings = Settings()

        # Get the context and initialise it
        self.context = Context()
        self.context.init()

        # Create the depth generator to get the depth map of the scene
        self.depth = DepthGenerator()
        self.depth.create(self.context)
        self.depth.set_resolution_preset(RES_VGA)
        self.depth.fps = 30

        # Create the user generator to detect skeletons
        self.user = UserGenerator()
        self.user.create(self.context)

        # Initialise the skeleton tracking
        skeleton.init(self.user)

        # Start generating
        self.context.start_generating_all()

        # Create a new dataset item
        self.data = LiveDataset()
        self.data.hand = self.settings.BOTH_HAND

        # Update the frame
        Timer(0.001, self.updateImage, ()).start()
	def __init__(self):
		# Retrieve all settings
		self.settings = Settings()
		
		# Get the context and initialise it
		self.context = Context()
		self.context.init()

		# Create the depth generator to get the depth map of the scene
		self.depth = DepthGenerator()
		self.depth.create(self.context)
		self.depth.set_resolution_preset(RES_VGA)
		self.depth.fps = 30

		# Create the user generator to detect skeletons
		self.user = UserGenerator()
		self.user.create(self.context)

		# Initialise the skeleton tracking
		skeleton.init(self.user)

		# Start generating
		self.context.start_generating_all()
		
		# Create a new dataset item
		self.data = LiveDataset()
		self.data.hand = self.settings.BOTH_HAND
		
		# Update the frame
		Timer(0.001, self.updateImage, ()).start()
	def __init__(self):
		super(LiveGui, self).__init__()
		self.setWindowTitle("Pointing Gesture Recognition - Live")
		
		# Retrieve all settings
		self.settings = Settings()

		# Get the context and initialise it
		self.context = Context()
		self.context.init()

		# Create the depth generator to get the depth map of the scene
		self.depth = DepthGenerator()
		self.depth.create(self.context)
		self.depth.set_resolution_preset(RES_VGA)
		self.depth.fps = 30

		# Create the user generator to detect skeletons
		self.user = UserGenerator()
		self.user.create(self.context)

		# Initialise the skeleton tracking
		skeleton.init(self.user)

		# Start generating
		self.context.start_generating_all()
		print "Starting to detect users.."
		
		
		# Create a new dataset item
		self.data = LiveDataset()
		
		
		# Create the global layout
		self.layout = QtWidgets.QVBoxLayout(self)
		
		# Create custom widgets to hold sensor's images
		self.depthImage = SensorWidget()
		self.depthImage.setGeometry(10, 10, 640, 480)
		
		# Add these custom widgets to the global layout
		self.layout.addWidget(self.depthImage)
		
		# Set the default result text
		self.resultLabel = QtWidgets.QLabel()
		self.resultLabel.setText("No")
		
		# Create the acquisition form elements
		self.createAcquisitionForm()
		
		
		# Create and launch a timer to update the images
		self.timerScreen = QtCore.QTimer()
		self.timerScreen.setInterval(30)
		self.timerScreen.setSingleShot(True)
		self.timerScreen.timeout.connect(self.updateImage)
		self.timerScreen.start()
Example #4
0
    def test_init(self):
        path = os.path.join(os.getcwd(), '.skeleton.json')

        self.assertFalse(os.path.exists(path))

        skeleton.init()

        self.assertTrue(os.path.exists(path))

        os.unlink(path)
    def __init__(self):
        super(LiveGui, self).__init__()
        self.setWindowTitle("Pointing Gesture Recognition - Live")

        # Retrieve all settings
        self.settings = Settings()

        # Get the context and initialise it
        self.context = Context()
        self.context.init()

        # Create the depth generator to get the depth map of the scene
        self.depth = DepthGenerator()
        self.depth.create(self.context)
        self.depth.set_resolution_preset(RES_VGA)
        self.depth.fps = 30

        # Create the user generator to detect skeletons
        self.user = UserGenerator()
        self.user.create(self.context)

        # Initialise the skeleton tracking
        skeleton.init(self.user)

        # Start generating
        self.context.start_generating_all()
        print "Starting to detect users.."

        # Create a new dataset item
        self.data = LiveDataset()

        # Create the global layout
        self.layout = QtWidgets.QVBoxLayout(self)

        # Create custom widgets to hold sensor's images
        self.depthImage = SensorWidget()
        self.depthImage.setGeometry(10, 10, 640, 480)

        # Add these custom widgets to the global layout
        self.layout.addWidget(self.depthImage)

        # Set the default result text
        self.resultLabel = QtWidgets.QLabel()
        self.resultLabel.setText("No")

        # Create the acquisition form elements
        self.createAcquisitionForm()

        # Create and launch a timer to update the images
        self.timerScreen = QtCore.QTimer()
        self.timerScreen.setInterval(30)
        self.timerScreen.setSingleShot(True)
        self.timerScreen.timeout.connect(self.updateImage)
        self.timerScreen.start()
Example #6
0
    def __init__(self):
        super(DatasetGui, self).__init__()
        self.setWindowTitle("Pointing Gesture Recognition - Dataset recording")

        # Retrieve all settings
        self.settings = Settings()

        # Load sounds
        self.countdownSound = QtMultimedia.QSound(
            self.settings.getResourceFolder() + "countdown.wav")
        self.countdownEndedSound = QtMultimedia.QSound(
            self.settings.getResourceFolder() + "countdown-ended.wav")

        # Get the context and initialise it
        self.context = Context()
        self.context.init()

        # Create the depth generator to get the depth map of the scene
        self.depth = DepthGenerator()
        self.depth.create(self.context)
        self.depth.set_resolution_preset(RES_VGA)
        self.depth.fps = 30

        # Create the image generator to get an RGB image of the scene
        self.image = ImageGenerator()
        self.image.create(self.context)
        self.image.set_resolution_preset(RES_VGA)
        self.image.fps = 30

        # Create the user generator to detect skeletons
        self.user = UserGenerator()
        self.user.create(self.context)

        # Initialise the skeleton tracking
        skeleton.init(self.user)

        # Start generating
        self.context.start_generating_all()
        print "Starting to detect users.."

        # Create a new dataset item
        self.data = Dataset()

        # Create a timer for an eventual countdown before recording the data
        self.countdownTimer = QtCore.QTimer()
        self.countdownRemaining = 10
        self.countdownTimer.setInterval(1000)
        self.countdownTimer.setSingleShot(True)
        self.countdownTimer.timeout.connect(self.recordCountdown)

        # Create a timer to eventually record data for a heat map
        self.heatmapRunning = False
        self.heatmapTimer = QtCore.QTimer()
        self.heatmapTimer.setInterval(10)
        self.heatmapTimer.setSingleShot(True)
        self.heatmapTimer.timeout.connect(self.recordHeatmap)

        # Create the global layout
        self.layout = QtWidgets.QVBoxLayout(self)

        # Create custom widgets to hold sensor's images
        self.depthImage = SensorWidget()
        self.depthImage.setGeometry(10, 10, 640, 480)

        # Add these custom widgets to the global layout
        self.layout.addWidget(self.depthImage)

        # Hold the label indicating the number of dataset taken
        self.numberLabel = QtWidgets.QLabel()
        self.updateDatasetNumberLabel()

        # Create the acquisition form elements
        self.createAcquisitionForm()

        # Register a dialog window to prompt the target position
        self.dialogWindow = DatasetDialog(self)

        # Allow to save the data when the right distance is reached
        self.recordIfReady = False

        # Create and launch a timer to update the images
        self.timerScreen = QtCore.QTimer()
        self.timerScreen.setInterval(30)
        self.timerScreen.setSingleShot(True)
        self.timerScreen.timeout.connect(self.updateImage)
        self.timerScreen.start()
	def __init__(self):
		super(DatasetGui, self).__init__()
		self.setWindowTitle("Pointing Gesture Recognition - Dataset recording")
		
		# Retrieve all settings
		self.settings = Settings()
		
		# Load sounds
		self.countdownSound = QtMultimedia.QSound(self.settings.getResourceFolder()+"countdown.wav")
		self.countdownEndedSound = QtMultimedia.QSound(self.settings.getResourceFolder()+"countdown-ended.wav")
		

		# Get the context and initialise it
		self.context = Context()
		self.context.init()

		# Create the depth generator to get the depth map of the scene
		self.depth = DepthGenerator()
		self.depth.create(self.context)
		self.depth.set_resolution_preset(RES_VGA)
		self.depth.fps = 30

		# Create the image generator to get an RGB image of the scene
		self.image = ImageGenerator()
		self.image.create(self.context)
		self.image.set_resolution_preset(RES_VGA)
		self.image.fps = 30

		# Create the user generator to detect skeletons
		self.user = UserGenerator()
		self.user.create(self.context)

		# Initialise the skeleton tracking
		skeleton.init(self.user)

		# Start generating
		self.context.start_generating_all()
		print "Starting to detect users.."
		
		
		# Create a new dataset item
		self.data = Dataset()
		
		
		# Create a timer for an eventual countdown before recording the data
		self.countdownTimer = QtCore.QTimer()
		self.countdownRemaining = 10
		self.countdownTimer.setInterval(1000)
		self.countdownTimer.setSingleShot(True)
		self.countdownTimer.timeout.connect(self.recordCountdown)
		
		# Create a timer to eventually record data for a heat map
		self.heatmapRunning = False
		self.heatmapTimer = QtCore.QTimer()
		self.heatmapTimer.setInterval(10)
		self.heatmapTimer.setSingleShot(True)
		self.heatmapTimer.timeout.connect(self.recordHeatmap)
		
		
		# Create the global layout
		self.layout = QtWidgets.QVBoxLayout(self)
		
		# Create custom widgets to hold sensor's images
		self.depthImage = SensorWidget()
		self.depthImage.setGeometry(10, 10, 640, 480)
		
		# Add these custom widgets to the global layout
		self.layout.addWidget(self.depthImage)
		
		# Hold the label indicating the number of dataset taken
		self.numberLabel = QtWidgets.QLabel()
		self.updateDatasetNumberLabel()
		
		# Create the acquisition form elements
		self.createAcquisitionForm()
		
		
		# Register a dialog window to prompt the target position
		self.dialogWindow = DatasetDialog(self)
		
		
		# Allow to save the data when the right distance is reached
		self.recordIfReady = False
		
		
		# Create and launch a timer to update the images
		self.timerScreen = QtCore.QTimer()
		self.timerScreen.setInterval(30)
		self.timerScreen.setSingleShot(True)
		self.timerScreen.timeout.connect(self.updateImage)
		self.timerScreen.start()