def startKinectV2Dev(self): from pylibfreenect2 import Freenect2, SyncMultiFrameListener, FrameType try: from pylibfreenect2 import OpenGLPacketPipeline pipeline = OpenGLPacketPipeline() except: try: from pylibfreenect2 import OpenCLPacketPipeline pipeline = OpenCLPacketPipeline() except: from pylibfreenect2 import CpuPacketPipeline pipeline = CpuPacketPipeline() print("LIBFREENECT2::Packet pipeline:", type(pipeline).__name__) self.fn = Freenect2() num_devices = self.fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = self.fn.getDeviceSerialNumber(0) self.device = self.fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener( FrameType.Color) # listen,ing only to rgb frames # Register listeners self.device.setColorFrameListener(self.listener) self.device.start()
def __init__(self, device_id=0): """Create an OpenCV capture object associated with the provided webcam device ID. """ logger = createConsoleLogger(LoggerLevel.Error) setGlobalLogger(logger) self.fn = Freenect2() num_devices = self.fn.enumerateDevices() if num_devices == 0: print("No Kinect!") raise LookupError() serial = self.fn.getDeviceSerialNumber(0) self.device = self.fn.openDevice(serial, pipeline=pipeline) types = 0 types |= FrameType.Color self.listener = SyncMultiFrameListener(types) self.device.setColorFrameListener(self.listener) # Start a thread to continuously capture frames. # This must be done because different layers of buffering in the webcam # and OS drivers will cause you to retrieve old frames if they aren't # continuously read. self._capture_frame = None # Use a lock to prevent access concurrent access to the camera. self._capture_lock = threading.Lock() self._capture_thread = threading.Thread(target=self._grab_frames) self._capture_thread.daemon = True self._capture_thread.start()
def __init__(self, kinect_num=0): self.fn = Freenect2() self.serial = None self.device = None self.listener = None self.registration = None self._frames = None # frames cache so that the user can use them before we free them self._bigdepth = Frame(1920, 1082, 4) # malloc'd self._undistorted = Frame(512, 424, 4) self._registered = Frame(512, 424, 4) self._color_dump = Frame(1920, 1080, 4) num_devices = self.fn.enumerateDevices() if num_devices <= kinect_num: raise ConnectionError("No Kinect device at index %d" % kinect_num) self.serial = self.fn.getDeviceSerialNumber(kinect_num) self.device = self.fn.openDevice(self.serial, pipeline=pipeline) self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener)
def __init__(self, frametypes=FrameType.Color | FrameType.Depth, registration=True): # open the device and start the listener num_devices = fn.enumerateDevices() assert num_devices > 0, "Couldn't find any devices" serial = fn.getDeviceSerialNumber(0) self.device = fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener(frametypes) self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() # NOTE: must be called after device.start() if registration: ir = self.device.getIrCameraParams() color = self.device.getColorCameraParams() self.registration = Registration(ir, color) # create these here so we don't have to every time we call update() self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) # which kinds of frames are we going to be reading? # will be true or false self.need_color = frametypes & FrameType.Color self.need_depth = frametypes & FrameType.Depth self.need_ir = frametypes & FrameType.Ir # initialize our frames self.frames = {} # ensure that we shut down smoothly atexit.register(self.close)
def __init__(self, device_no=0): # Package Pipeline try: from pylibfreenect2 import OpenGLPacketPipeline self.pipeline = OpenGLPacketPipeline() except: try: from pylibfreenect2 import OpenCLPacketPipeline self.pipeline = OpenCLPacketPipeline() except: from pylibfreenect2 import CpuPacketPipeline self.pipeline = CpuPacketPipeline() print "Packet pipeline: {}".format(type(self.pipeline).__name__) # Create and set logger logger = createConsoleLogger(LoggerLevel.Error) setGlobalLogger(logger) # Detect Kinect Devices self.fn = Freenect2() num_devices = self.fn.enumerateDevices() if num_devices == 0: print("No device connected!") self.device = None sys.exit(1) # Create Device and Frame Listeners self.serial = self.fn.getDeviceSerialNumber(device_no) self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline) self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) # Register Listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) # Start Device self.device.start() # Set Camera Parameters self._ir_params = self.device.getIrCameraParams() self._color_params = self.device.getColorCameraParams() ''' self._ir_params.fx = cameraParams.params['ir_fx'] self._ir_params.fy = cameraParams.params['ir_fy'] self._ir_params.cx = cameraParams.params['ir_cx'] self._ir_params.cy = cameraParams.params['ir_cy'] self._color_params.fx = cameraParams.params['col_fx'] self._color_params.fy = cameraParams.params['col_fy'] self._color_params.cx = cameraParams.params['col_cx'] self._color_params.cy = cameraParams.params['col_cy'] ''' # Registration self.registration = Registration(self._ir_params, self._color_params) print '[Info] Initialization Finished!'
def __init__(self): self.pipeline = OpenCLPacketPipeline() fn = Freenect2() self.device = fn.openDefaultDevice(pipeline=self.pipeline) self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4)
def run(self): """ main function to start Kinect and read depth information from Kinect, show video and save it """ Fn = Freenect2() num_devices = Fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = Fn.getDeviceSerialNumber(0) device = Fn.openDevice(serial, pipeline=pipeline) listener = SyncMultiFrameListener(FrameType.Depth) device.setIrAndDepthFrameListener(listener) device.start() fourcc = cv2.VideoWriter_fourcc('m', 'j', 'p', 'g') depth_video = cv2.VideoWriter( 'Depth.avi', fourcc, 30.0, (512, 424), 0) i = 0 while not self._done: frames = listener.waitForNewFrame() depth = frames["depth"] depth = depth.asarray().clip(0, 4096) hand_contour = self._find_contour(depth) darks = np.zeros((424, 512), dtype=np.uint8) if cv2.contourArea(hand_contour[0]) < 1000 or cv2.contourArea(hand_contour[0]) > 5000: self.cover = np.uint8(depth/16.) else: seg_depth = self._segment(depth, hand_contour, darks) np.save('./offline/data/seg_depth%d.npy' % i, seg_depth) i += 1 cv2.imshow("depth", self.cover) depth_video.write(self.cover) listener.release(frames) key = cv2.waitKey(delay=1) if key == ord('q'): self._done = True depth_video.release() device.stop() device.close() sys.exit()
class Kinect2: def __init__(self, calibration_folder=None): self.freenect = Freenect2() setGlobalLogger(createConsoleLogger(LoggerLevel.NONE)) num_devices = self.freenect.enumerateDevices() if num_devices == 0: raise ValueError("No kinect2 device connected!") serial = self.freenect.getDeviceSerialNumber(0) self.device = self.freenect.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() self.calibration = {} if calibration_folder is not None: self.calibration = utils.load_calibration(calibration_folder) def snapshot(self): frames = self.listener.waitForNewFrame() color = frames["color"] depth = frames["depth"] ir = frames["ir"] color = color.asarray(np.uint8).copy() depth = depth.asarray(np.float32).copy() ir = ir.asarray(np.float32).copy() # flip images color = color[:, ::-1] ir = ir[:, ::-1] depth = depth[:, ::-1] color = color[..., :3] # color = color[..., ::-1] ir = self._convert_ir(ir) self.listener.release(frames) return color, depth, ir def _convert_ir(self, ir): """ convertIr function https://github.com/code-iai/iai_kinect2/blob/master/kinect2_calibration/src/kinect2_calibration.cpp """ ir_min, ir_max = ir.min(), ir.max() ir_convert = (ir - ir_min) / (ir.max() - ir.min()) ir_convert *= 255 ir_convert = np.tile(ir_convert[..., None], (1, 1, 3)) return ir_convert.astype(np.uint8)
def getcameraimgbykinect(self, basedir): try: from pylibfreenect2 import OpenGLPacketPipeline pipeline = OpenGLPacketPipeline() except: try: from pylibfreenect2 import OpenCLPacketPipeline pipeline = OpenCLPacketPipeline() except: from pylibfreenect2 import CpuPacketPipeline pipeline = CpuPacketPipeline() fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") return serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) types = 0 types |= FrameType.Color listener = SyncMultiFrameListener(types) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.startStreams(rgb=True, depth=False) while True: frames = listener.waitForNewFrame() color = frames["color"] colorimg = cv2.resize(color.asarray(), (200,150)) (shortDate, longDate)=self.getDateStr() file_dir = basedir+"/"+shortDate if not os.path.exists(file_dir): os.makedirs(file_dir) new_filename = Pic_str().create_uuid() + '.jpg' fpath = os.path.join(file_dir, new_filename) # print(fpath ) cv2.imwrite(fpath, colorimg, [cv2.IMWRITE_JPEG_QUALITY,70]) listener.release(frames) isfire = self.fireDetect.detect_image(fpath) self.sendUtils.sendReqtoServer("园区厂区仓库","网络摄像头",longDate,isfire,shortDate+"/"+new_filename) time.sleep(3) device.stop() device.close() return
def __init__(self): try: from pylibfreenect2 import OpenCLPacketPipeline pipeline = OpenCLPacketPipeline() except: try: from pylibfreenect2 import OpenGLPacketPipeline pipeline = OpenGLPacketPipeline() except: from pylibfreenect2 import CpuPacketPipeline pipeline = CpuPacketPipeline() print("Packet pipeline:", type(pipeline).__name__) # Create and set logger logger = createConsoleLogger(LoggerLevel.Debug) setGlobalLogger(logger) self.freenect = Freenect2() num_devices = self.freenect.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = self.freenect.getDeviceSerialNumber(0) self.device = self.freenect.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) self.run_loop = True self.lock = threading.Lock() self.img_buffer = np.zeros( (RES_COLOR[1], RES_COLOR[0], 3, SIZE_AVG_FILTER)) self.idx_buffer = 0 # Close on Ctrl-C def signal_handler(signal, frame): self.run_loop = False signal.signal(signal.SIGINT, signal_handler)
def connect(self): # Connect to kinect self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline) # Define RGB listener self.listener = SyncMultiFrameListener(FrameType.Color) # Register listeners self.device.setColorFrameListener(self.listener) # Get a frame from the listener self.device.startStreams(rgb=True, depth=False) self.connected = True
def connect_kinect(self): # Bad, but needed self.fn = Freenect2() num_devices = self.fn.enumerateDevices() if num_devices == 0: print("Kinect not found, check again") self.connected = False return try: self.pipeline = OpenGLPacketPipeline() except: self.pipeline = CpuPacketPipeline() serial = self.fn.getDeviceSerialNumber(0) self.device = self.fn.openDevice(serial, pipeline=self.pipeline) self.listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() print("started") # NOTE: must be called after device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) print("registration") self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) self.connected = True
def open(self): self._listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) self._device = self._freenect.openDevice(self._serial, pipeline=pipeline) device_index = self.__device_list_index() if self._device_index != device_index: # keep track of changes in the device list self._device_index = device_index self._device.close() self._listener.release(self.frames) self.open() return self._device.setColorFrameListener(self._listener) self._device.setIrAndDepthFrameListener(self._listener) self._device.start() self._opened = True self._playing = False
def _init_device(self): if _platform == 'Windows': device = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Infrared) elif _platform == 'Linux': if _pylib: # self.device = Device() fn = Freenect2() num_devices = fn.enumerateDevices() assert num_devices > 0 serial = fn.getDeviceSerialNumber(0) self.device = fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) frames = self.listener.waitForNewFrame() self.listener.release(frames) self.device.stop() self.device.close() elif _lib: try: self.device = Device() except: traceback.print_exc() else: print(_platform) raise NotImplementedError
def __init__(self, calibration_folder=None): self.freenect = Freenect2() setGlobalLogger(createConsoleLogger(LoggerLevel.NONE)) num_devices = self.freenect.enumerateDevices() if num_devices == 0: raise ValueError("No kinect2 device connected!") serial = self.freenect.getDeviceSerialNumber(0) self.device = self.freenect.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() self.calibration = {} if calibration_folder is not None: self.calibration = utils.load_calibration(calibration_folder)
def service_func(): try: from pylibfreenect2 import OpenGLPacketPipeline pipeline = OpenGLPacketPipeline() except: from pylibfreenect2 import CpuPacketPipeline pipeline = CpuPacketPipeline() fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) listener = SyncMultiFrameListener(FrameType.Ir) device.setIrAndDepthFrameListener(listener) directory = os.path.dirname(os.path.realpath('__file__')) try: while 1: device.start() count=0 for i in range(1,5): frames = listener.waitForNewFrame() ir = frames["ir"] tmpimg = ir.asarray() / 256. tmp = tmpimg.astype(int) path = directory +"/kinect.png" print path cv2.imwrite(path, tmp) listener.release(frames) if i < 4 : time.sleep(30-time.localtime().tm_sec%30) device.stop() time.sleep(5) time.sleep((90-time.localtime().tm_sec)%30) except KeyboardInterrupt: device.close() exit
class Kinect: def __init__(self): self.fn = Freenect2() num_devices = self.fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = self.fn.getDeviceSerialNumber(0) self.device = self.fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener( FrameType.Color) # Register listeners self.device.setColorFrameListener(self.listener) # device.setIrAndDepthFrameListener(listener) self.device.start() # NOTE: must be called after device.start() registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) def __del__(self): self.device.stop() self.device.close() def get_image(self): frames = self.listener.waitForNewFrame() color = frames["color"].asarray()[:,::-1,:3] self.listener.release(frames) # if np.sum(color[-1,:,:]) == 0: # return None return color
def __test(enable_rgb, enable_depth): fn = Freenect2() num_devices = fn.enumerateDevices() assert num_devices > 0 device = fn.openDefaultDevice() types = 0 if enable_rgb: types |= FrameType.Color if enable_depth: types |= (FrameType.Ir | FrameType.Depth) listener = SyncMultiFrameListener(types) device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.startStreams(rgb=enable_rgb, depth=enable_depth) # test if we can get one frame at least frames = listener.waitForNewFrame() listener.release(frames) device.stop() device.close()
def initCamera(fn, pipeline): num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) types = (FrameType.Ir | FrameType.Depth) listener = SyncMultiFrameListener(types) device.setIrAndDepthFrameListener(listener) device.startStreams(rgb= False, depth=True) registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) return (device, listener, registration)
def __init__(self): self.averageSpineX = 0 self.fn = Freenect2() if self.fn.enumerateDevices() == 0: sys.exit(47) self.serial = self.fn.getDeviceSerialNumber(0) types = 0 types |= FrameType.Color types |= (FrameType.Ir | FrameType.Depth) self.listener = SyncMultiFrameListener(types) self.logger = createConsoleLogger(LoggerLevel.Debug) self.device = self.fn.openDevice(self.serial) self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4)
def setup(self, fn, pipeline, **kwargs): super(KivyCamera, self).__init__(**kwargs) self.bg_image = None self.current_img = None self.current_final_img = None self.current_mask = None serial = fn.getDeviceSerialNumber(0) self.device = fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() # NOTE: must be called after device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) # Optinal parameters for registration # set True if you need need_bigdepth = True need_color_depth_map = False self.bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None self.color_depth_map = np.zeros((424, 512), np.int32).ravel() \ if need_color_depth_map else None self.clipping_distance = 0.5
def start_kinect(self): """ start the Kinect and config it with only depth information output """ Fn = Freenect2() num_devices = Fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = Fn.getDeviceSerialNumber(0) self.device = Fn.openDevice(serial, pipeline=pipeline) # select the depth data only self.listener = SyncMultiFrameListener(FrameType.Depth) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() IRP = device.getIrCameraParams() focal = IRP.fx
def __init__(self): self.fn = Freenect2() num_devices = self.fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = self.fn.getDeviceSerialNumber(0) self.device = self.fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener( FrameType.Color) # Register listeners self.device.setColorFrameListener(self.listener) # device.setIrAndDepthFrameListener(listener) self.device.start() # NOTE: must be called after device.start() registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams())
class KinectCamera(Camera): def __init__(self): self.pipeline = OpenCLPacketPipeline() fn = Freenect2() self.device = fn.openDefaultDevice(pipeline=self.pipeline) self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) def getFrames(self): frames = self.listener.waitForNewFrame() self.registration.apply(frames['color'], frames['depth'], self.undistorted, self.registered) frames['registered'] = self.registration return frames def stop(self): self.device.stop() self.device.close()
class Kinect: def __init__(self): self.averageSpineX = 0 self.fn = Freenect2() if self.fn.enumerateDevices() == 0: sys.exit(47) self.serial = self.fn.getDeviceSerialNumber(0) types = 0 types |= FrameType.Color types |= (FrameType.Ir | FrameType.Depth) self.listener = SyncMultiFrameListener(types) self.logger = createConsoleLogger(LoggerLevel.Debug) self.device = self.fn.openDevice(self.serial) self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) def valueBounded(self, checkValue, absoluteValue): if ((-absoluteValue <= checkValue >= absoluteValue)): return True else: return False def valueUnbounded(self, checkValue, absoluteValue): if ((checkValue > absoluteValue) | (checkValue < -absoluteValue)): return True else: return False def valuePlusMinusDifferential(self, checkValue, testValue, Differential): if((checkValue < testValue + Differential) & (checkValue > testValue - Differential)): return True else: return False def update(self, registration): self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) frames = self.listener.waitForNewFrame() depth = frames["depth"] color = frames["color"] d = self.undistorted.asarray(dtype=np.float32) registration.apply(color, depth, self.undistorted, self.registered) self.listener.release(frames) return d # calculate the average and skeleton points of a depth array, those # values plus the depth array # calculate mean depth of depth array, used to find skeleton points. def getMeanDepth(self, depth, rows, cols): total = 0 sumDepth = 0 for row in range(rows): for col in range(cols): try: offset = row + col * (rows) except IndexError as e: print e #if depth[offset]['depth'] < DANGER_THRESHOLD: total += 1 sumDepth += depth[offset]['depth'] return (sumDepth / total) # calculate the skeleton points of the depth array def getSkeleton(self, depthArray, average, rows, cols): topY = 0 leftX = rows bottomY = cols rightX = 0 for d in depthArray: #print depthArray[offset] if (d['x'] > rightX): rightX = d['x'] if (d['x'] < leftX): leftX = d['x'] if (d['y'] < bottomY): bottomY = d['y'] if (d['y'] > topY): topY = d['y'] averageX = (leftX + rightX) / 2 returnValues = {'averageX': averageX, 'topY': topY, 'bottomY': bottomY, 'rightX': rightX, 'leftX': leftX} return returnValues def changeInX(self, spine): print "changeInX" if self.averageSpineX == 0: self.averageSpineX = spine['averageX'] elif (self.valueBounded((self.averageSpineX - spine['averageX']), X_CHANGE_THRESHOLD)): self.averageSpineX = ((self.averageSpineX + spine['averageX']) / 2) else: self.checkDrowning() # Check to see if the difference between the averageSpineX and the last # analyzed averageX is less below a threshold. If it is, the loop # continues. If it runs for 20 seconds, it will set the drowning flag to # true. If falsePositive gets to be too high, DORi will no longer # assume the victim is a drowning risk def checkDrowning(self): print "checkDrowning" drowningRisk = True drowning = False falsePositive = 0 # 20 seconds from start of def # timeLimit = time.time() + DROWN_TIME self.device.start() while drowningRisk: print 'checking...' if time.time() > timeLimit: drowningRisk = False drowning = True data = self.fullDataLoop() if (self.valueUnbounded((self.averageSpineX - data['spine']['averageX']), X_CHANGE_THRESHOLD)): falsePositive += 1 if falsePositive > 100: drowningRisk = False else: continue if drowning: self.device.stop() print "This guy is for sure drowning" sys.exit(47) self.device.stop() def depthMatrixToPointCloudPos2(self, depthArray): R, C = depthArray.shape R -= KINECT_SPECS['cx'] R *= depthArray R /= KINECT_SPECS['fx'] C -= KINECT_SPECS['cy'] C *= depthArray C /= KINECT_SPECS['fy'] return np.column_stack((depthArray.ravel(), R.ravel(), C.ravel())) def getBody(self, depthArray, average, rows, cols): body = [] for d in depthArray: if self.valuePlusMinusDifferential(d['depth'], average, BODY_DEPTH_THRESHOLD): body.append(d) return body def pointRavel(self, unraveledArray, rows, cols): raveledArray = [] d = unraveledArray.ravel() for row in range(rows): for col in range(cols): try: offset = row + col * (rows) raveledArray.append({'x': row, 'y': col, 'depth': d[offset]}) except IndexError as e: print e return raveledArray def fullDataLoop(self): registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) deptharraytest = [{'x': 152, 'y': 100, 'depth': 400}, {'x': 300, 'y': 200, 'depth': 400}, {'x': 350, 'y': 150, 'depth': 400}, {'x': 400, 'y': 300, 'depth': 400}, {'x': 22, 'y': 10, 'depth': 400}, {'x': 144, 'y': 12, 'depth': 400}] depthArray = self.update(registration) rows, cols = depthArray.shape d = self.pointRavel(depthArray, rows, cols) m = self.getMeanDepth(d, rows, cols) b = self.getBody(d, m, rows, cols) s = self.getSkeleton(deptharraytest, m, cols, rows) return {'spine' : s, 'meanDepth' : m} def run(self, duration): end = time.time() + duration self.device.start() registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) deptharraytest = [{'x':152, 'y':100, 'depth':400}, {'x':300, 'y':200, 'depth':400}, {'x': 350, 'y': 150, 'depth': 400},{'x':400, 'y':300, 'depth':400}, {'x': 22, 'y': 10, 'depth': 400},{'x':144, 'y':12, 'depth':400}] while time.time() < end: depthArray = self.update(registration) rows, cols = depthArray.shape d = self.pointRavel(depthArray, rows, cols) m =self.getMeanDepth(d, rows, cols) b = self.getBody(d, m, rows, cols) s = self.getSkeleton(deptharraytest, m, cols, rows) self.changeInX(s) self.device.stop() def execute(self): self.device.start() registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) d = self.update(registration) shape = d.shape m = self.getMeanDepth(d, 9) b = self.getBody(d,m) s = self.getSkeleton(d, m) self.changeInX(s) print 'depthArray' + str(len(d.ravel())) print 'body' + str(len(b)) for body in b: print "(" + str(body['x']) + "," + str(body['y']) + "): " + str(body['z']) print m self.device.stop() def exit(self): self.device.stop() self.device.close()
class VideoCapture: def __init__(self, video_source): self.video_source = video_source if video_source == 'kinectv2': self.listener = None self.device = None self.fn = None self.frames = None self.startKinectV2Dev() def startKinectV2Dev(self): from pylibfreenect2 import Freenect2, SyncMultiFrameListener, FrameType try: from pylibfreenect2 import OpenGLPacketPipeline pipeline = OpenGLPacketPipeline() except: try: from pylibfreenect2 import OpenCLPacketPipeline pipeline = OpenCLPacketPipeline() except: from pylibfreenect2 import CpuPacketPipeline pipeline = CpuPacketPipeline() print("LIBFREENECT2::Packet pipeline:", type(pipeline).__name__) self.fn = Freenect2() num_devices = self.fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = self.fn.getDeviceSerialNumber(0) self.device = self.fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener( FrameType.Color) # listen,ing only to rgb frames # Register listeners self.device.setColorFrameListener(self.listener) self.device.start() def stopKinectV2Dev(self): self.device.stop() self.device.close() def stopVideoFeed(self): if self.video_source == 'kinectv2': self.stopKinectV2Dev() def getKinectV1RgbFrame(self): import freenect array, _ = freenect.sync_get_video() rgb_frame = cv2.cvtColor(array, cv2.COLOR_RGB2BGR) return rgb_frame def getKinectV2RgbFrame(self): frames = self.listener.waitForNewFrame() return frames def getRgbFrame(self): rgb_frame = None if self.video_source == 'kinectv1': import freenect rgb_frame = self.getKinectV1RgbFrame() elif self.video_source == 'kinectv2': from pylibfreenect2 import Freenect2, SyncMultiFrameListener, FrameType rgb_frame = self.getKinectV2RgbFrame() return rgb_frame def getNextFrame(self): print "video source ", self.video_source if self.video_source == 'kinectv1': rgb_frame = self.getKinectV1RgbFrame() elif self.video_source == 'kinectv2': if self.frames: self.listener.release(self.frames) self.frames = self.getKinectV2RgbFrame() resised_rgb_frame = cv2.resize(self.frames["color"].asarray(), (int(1920), int(1080))) rgb_frame = resised_rgb_frame[:, ::-1, :] else: print "no video feed" return frame = cv2.GaussianBlur(rgb_frame, (7, 7), 0) return frame def launchVideoForColorCalib(self): while (1): frame = self.getNextFrame() # Convert the image to hsv space and find range of colors #hsvFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # self.detectReward(frame, hsvFrame) # Show the original and processed feed hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) cv2.imshow('Original feed', frame) cv2.setMouseCallback('Original feed', self.onMouseClick, hsv_frame) # cv2.imshow('Original feed', hsv_frame) # if 'q' is pressed then exit the loop if cv2.waitKey(33) == ord('q'): break # Destroy all windows exit the program cv2.destroyAllWindows()
os.remove(f) # Create and set logger logger = createConsoleLogger(LoggerLevel.Debug) setGlobalLogger(logger) fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.start() # NOTE: must be called after device.start() registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) h, w = 512, 424 FOVX = 1.232202 #horizontal FOV in radians focal_x = device.getIrCameraParams().fx #focal length x focal_y = device.getIrCameraParams().fy #focal length y
from pylibfreenect2 import CpuPacketPipeline pipeline = CpuPacketPipeline() print("Packet pipeline:", type(pipeline).__name__) fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) types = 0 types |= (FrameType.Ir | FrameType.Depth) listener = SyncMultiFrameListener(types) device.setIrAndDepthFrameListener(listener) device.startStreams(rgb=False, depth=True) #registration = Registration(device.getIrCameraParams(),device.getColorCameraParams()) def estimate_coef(x, y): # number of observations/points n = np.size(x) # mean of x and y vector m_x, m_y = np.mean(x), np.mean(y) # calculating cross-deviation and deviation about x SS_xy = np.sum(y * x - n * m_y * m_x) SS_xx = np.sum(x * x - n * m_x * m_x)
def find_and_track_kinect(name, tracker = "CSRT", min_range = 0, max_range = 1700, face_target_box = DEFAULT_FACE_TARGET_BOX, res = (RGB_W, RGB_H), video_out = True, debug = True): known_faces = {} for person in faces: image = face_recognition.load_image_file(faces[person]) print(person) face_encoding_list = face_recognition.face_encodings(image) if len(face_encoding_list) > 0: known_faces[person] = face_encoding_list[0] else: print("\t Could not find face for person...") fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline = pipeline) listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth) device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.start() registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) undistorted = Frame(512, 424, 4) registered = Frame(512, 424, 4) bigdepth = Frame(1920, 1082, 4) trackerObj = None face_count = 5 face_process_frame = True bbox = None face_bbox = None track_bbox = None while True: timer = cv2.getTickCount() person_found = False frames = listener.waitForNewFrame() color = frames["color"] depth = frames["depth"] registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth) bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920)) c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR) __clean_color(c, bd, min_range, max_range) if face_process_frame: small_c = __crop_frame(c, face_target_box) face_locations = face_recognition.face_locations(small_c, model="cnn") face_encodings = face_recognition.face_encodings(small_c, face_locations) for face_encoding in face_encodings: matches = face_recognition.compare_faces( [known_faces[name]], face_encoding, 0.6) if len(matches) > 0 and matches[0]: person_found = True face_count += 1 (top, right, bottom, left) = face_locations[0] left += face_target_box[0] top += face_target_box[1] right += face_target_box[0] bottom += face_target_box[1] face_bbox = (left, top, right, bottom) person_found = True break face_process_frame = not face_process_frame overlap_pct = 0 track_area = __bbox_area(track_bbox) if track_area > 0 and face_bbox: overlap_area = __bbox_overlap(face_bbox, track_bbox) overlap_pct = min(overlap_area / __bbox_area(face_bbox), overlap_area / __bbox_area(track_bbox)) if person_found and face_count >= FACE_COUNT and overlap_pct < CORRECTION_THRESHOLD: bbox = (face_bbox[0], face_bbox[1], face_bbox[2] - face_bbox[0], face_bbox[3] - face_bbox[1]) trackerObj = __init_tracker(c, bbox, tracker) face_count = 0 status = False if trackerObj is not None: status, trackerBBox = trackerObj.update(c) bbox = (int(trackerBBox[0]), int(trackerBBox[1]), int(trackerBBox[0] + trackerBBox[2]), int(trackerBBox[1] + trackerBBox[3])) if bbox is not None: track_bbox = bbox fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer) w = 0 h = 0 if status: w = track_bbox[0] + int((track_bbox[2] - track_bbox[0])/2) h = track_bbox[1] + int((track_bbox[3] - track_bbox[1])/2) if (w < res[0] and w >= 0 and h < res[1] and h >= 0): distanceAtCenter = bd[h][w] __update_individual_position("NONE", track_bbox, distanceAtCenter, res) if video_out: cv2.line(c, (w, 0), (w, int(res[1])), (0,255,0), 1) cv2.line(c, (0, h), \ (int(res[0]), h), (0,255,0), 1) __draw_bbox(True, c, face_target_box, (255, 0, 0), "FACE_TARGET") __draw_bbox(status, c, track_bbox, (0, 255, 0), tracker) __draw_bbox(person_found, c, face_bbox, (0, 0, 255), name) c = __scale_frame(c, scale_factor = 0.5) cv2.putText(c, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1) if not status: failedTrackers = "FAILED: " failedTrackers += tracker + " " cv2.putText(c, failedTrackers, (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,142), 1) cv2.imshow("color", c) listener.release(frames) key = cv2.waitKey(1) & 0xff if key == ord('q'): break device.stop() device.close()
class Tracker: # X_POSITION = -0.11387496 # Z_POSITION = 1.3 X_POSITION = -0.185 Z_POSITION = 1.5 greenLower = (29, 86, 6) greenUpper = (64, 255, 255) greenLower = (27, 100, 50) greenUpper = (60, 255, 255) def __init__(self, use_kinect=False, basket=False, gui=True): self.connected = False self.max_radius = 100000 self.running = False self.killed = False self.gui = gui self.basket = basket self.use_kinect = use_kinect # self.fig, self.ax = plt.subplots() if self.use_kinect: self.px = [] self.py = [] logger = createConsoleLogger(LoggerLevel.Error) setGlobalLogger(logger) while not self.connected: # Bad, but needed if self.use_kinect: self.connect_kinect() else: self.connect_camera() self.thread = threading.Thread(target=self.video_thread) self.thread.start() def connect_camera(self): # Bad, but needed self.camera = cv2.VideoCapture(1) if not self.camera.isOpened(): print("Unable to connect to the camera, check again") time.sleep(5) else: self.connected = True def connect_kinect(self): # Bad, but needed self.fn = Freenect2() num_devices = self.fn.enumerateDevices() if num_devices == 0: print("Kinect not found, check again") self.connected = False return try: self.pipeline = OpenGLPacketPipeline() except: self.pipeline = CpuPacketPipeline() serial = self.fn.getDeviceSerialNumber(0) self.device = self.fn.openDevice(serial, pipeline=self.pipeline) self.listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() print("started") # NOTE: must be called after device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) print("registration") self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) self.connected = True def video_thread(self): print('Camera thread started') need_bigdepth = False need_color_depth_map = False bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None color_depth_map = np.zeros((424, 512), np.int32).ravel() \ if need_color_depth_map else None while not self.killed: while self.running: # (grabbed, frame) = self.camera.read() frames = self.listener.waitForNewFrame() frame = frames["color"] ir = frames["ir"] depth = frames["depth"] self.registration.apply(frame, depth, self.undistorted, self.registered, bigdepth=bigdepth, color_depth_map=color_depth_map) # print(frame.width, frame.height, frame.bytes_per_pixel) # print(ir.width, ir.height, ir.bytes_per_pixel) # print(depth.width, depth.height, depth.bytes_per_pixel) # print(frame.asarray().shape, ir.asarray().shape, depth.asarray().shape) # color_image_array = frame.asarray() color_image_array = self.registered.asarray(np.uint8) hsv = cv2.cvtColor(color_image_array, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, self.greenLower, self.greenUpper) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None if len(cnts) > 0: c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if radius > 10: cv2.circle(color_image_array, (int(x), int(y)), int(radius), (0, 255, 255), 2) cv2.circle(color_image_array, center, 5, (0, 0, 255), -1) x, y, z = self.registration.getPointXYZ(self.undistorted, y, x) # print(x, y, z, end='\r') if not math.isnan(y) and not math.isnan(z): self.px.append(y) self.py.append(z) row = tuple(c[c[:, :, 1].argmin()][0])[1] self.max_radius = min(self.max_radius, row) if self.gui: cv2.imshow("Frame", color_image_array) cv2.imshow("Masked", mask) self.listener.release(frames) cv2.waitKey(1) & 0xFF # self.ax.plot(self.px) # self.ax.plot(self.py) # plt.pause(0.01) cv2.destroyAllWindows() print("exit looping") def start(self): self.max_radius = 100000 if self.use_kinect: self.px = [] self.py = [] self.running = True def stop(self): self.running = False def kill(self): self.killed = True if self.use_kinect: self.device.stop() self.device.close() def get_reward(self): if self.use_kinect: if not self.basket: try: peaks = peakutils.indexes(self.px) except Exception as e: print(e) peaks = np.array([]) if peaks.any(): # for xx in peaks: # t.ax.plot(xx, self.px[xx], 'ro') return self.py[peaks[0]] * 100 else: return 0 else: print(np.mean(self.px), np.mean(self.py)) dist = distance(np.array([self.px, self.py]).T, self.X_POSITION, self.Z_POSITION) print("distance", dist, len(self.px)) return dist * 100 else: return self.max_radius
print("Packet pipeline:", type(pipeline).__name__) # Create and set logger logger = createConsoleLogger(LoggerLevel.Debug) setGlobalLogger(logger) fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.start() # NOTE: must be called after device.start() registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) undistorted = Frame(512, 424, 4) registered = Frame(512, 424, 4) # Optinal parameters for registration
def get_image(imgtopic): image = imgtopic fn = Freenect2() num_devices = fn.enumerateDevices() assert num_devices > 0 serial = fn.getDefaultDeviceSerialNumber() assert serial == fn.getDeviceSerialNumber(0) device = fn.openDevice(serial) assert fn.getDefaultDeviceSerialNumber() == device.getSerialNumber() device.getFirmwareVersion() listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.start() # Registration registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) # undistorted = Frame(512, 424, 4) # registered = Frame(512, 424, 4) # # optional parameters for registration # bigdepth = Frame(1920, 1082, 4) # color_depth_map = np.zeros((424, 512), np.int32) # # test if we can get two frames at least # frames = listener.waitForNewFrame() # listener.release(frames) # frames as a first argment also should work frames = FrameMap() listener.waitForNewFrame(frames) color = frames[FrameType.Color] # ir = frames[FrameType.Ir] # depth = frames[FrameType.Depth] # for frame in [ir, depth]: # assert frame.exposure == 0 # assert frame.gain == 0 # assert frame.gamma == 0 # for frame in [color]: # assert frame.exposure > 0 # assert frame.gain > 0 # assert frame.gamma > 0 # registration.apply(color, depth, undistorted, registered) # registration.apply(color, depth, undistorted, registered, # bigdepth=bigdepth, # color_depth_map=color_depth_map.ravel()) # assert color.width == 1920 # assert color.height == 1080 # assert color.bytes_per_pixel == 4 # assert ir.width == 512 # assert ir.height == 424 # assert ir.bytes_per_pixel == 4 # assert depth.width == 512 # assert depth.height == 424 # assert depth.bytes_per_pixel == 4 # assert color.asarray().shape == (color.height, color.width, 4) # assert ir.asarray().shape == (ir.height, ir.width) # assert depth.asarray(np.float32).shape == (depth.height, depth.width) return color.asarray()
def test_sync_multi_frame(): fn = Freenect2() num_devices = fn.enumerateDevices() assert num_devices > 0 serial = fn.getDefaultDeviceSerialNumber() assert serial == fn.getDeviceSerialNumber(0) # TODO: tests for openDefaultDevice # device = fn.openDefaultDevice() device = fn.openDevice(serial) assert fn.getDefaultDeviceSerialNumber() == device.getSerialNumber() device.getFirmwareVersion() listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.start() # Registration registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) undistorted = Frame(512, 424, 4) registered = Frame(512, 424, 4) # test if we can get two frames at least frames = listener.waitForNewFrame() listener.release(frames) frames = listener.waitForNewFrame() color = frames[FrameType.Color] ir = frames[FrameType.Ir] depth = frames[FrameType.Depth] for frame in [ir, depth]: assert frame.exposure == 0 assert frame.gain == 0 assert frame.gamma == 0 for frame in [color]: assert frame.exposure > 0 assert frame.gain > 0 assert frame.gamma > 0 registration.apply(color, depth, undistorted, registered) ### Color ### assert color.width == 1920 assert color.height == 1080 assert color.bytes_per_pixel == 4 assert ir.width == 512 assert ir.height == 424 assert ir.bytes_per_pixel == 4 assert depth.width == 512 assert depth.height == 424 assert depth.bytes_per_pixel == 4 assert color.asarray().shape == (color.height, color.width, 4) assert ir.asarray().shape == (ir.height, ir.width) assert depth.astype(np.float32).shape == (depth.height, depth.width) listener.release(frames) def __test_cannot_determine_type_of_frame(frame): frame.asarray() for frame in [registered, undistorted]: yield raises(ValueError)(__test_cannot_determine_type_of_frame), frame device.stop() device.close()
fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) types = 0 if enable_rgb: types |= FrameType.Color if enable_depth: types |= (FrameType.Ir | FrameType.Depth) listener = SyncMultiFrameListener(types) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) if enable_rgb and enable_depth: device.start() else: device.startStreams(rgb=enable_rgb, depth=enable_depth) # NOTE: must be called after device.start() if enable_depth: registration = Registration(device.getIrCameraParams(), device.getColorCameraParams())
class KivyCamera(Image): def __init__(self, **kwargs): super(KivyCamera, self).__init__(**kwargs) def setup(self, fn, pipeline, **kwargs): super(KivyCamera, self).__init__(**kwargs) self.bg_image = None self.current_img = None self.current_final_img = None self.current_mask = None serial = fn.getDeviceSerialNumber(0) self.device = fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() # NOTE: must be called after device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) # Optinal parameters for registration # set True if you need need_bigdepth = True need_color_depth_map = False self.bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None self.color_depth_map = np.zeros((424, 512), np.int32).ravel() \ if need_color_depth_map else None self.clipping_distance = 0.5 def update(self, dt): frames = self.listener.waitForNewFrame() color = frames["color"] depth = frames["depth"] self.registration.apply(color, depth, self.undistorted, self.registered, bigdepth=self.bigdepth, color_depth_map=self.color_depth_map) self.current_img = color.asarray()[:,:,:3] depth_img = self.bigdepth.asarray(np.float32) / 4500. # scale to interval [0,1] if(self.bg_image is not None): if(self.bg_image.shape[2] == 4): # put frame around picture fgMask = self.bg_image[:,:,2]>0 else: # modify background # if needed: denoise filter depth_img = cv2.medianBlur(depth_img, 5) depth_img = cv2.GaussianBlur(depth_img, (9,9), 0) fgMask = ((depth_img > self.clipping_distance) & (depth_img > 0)) fgMask = cv2.resize(fgMask.astype(np.uint8), (1920, 1080)) # if needed: morphological operations #kernel = np.ones((10,10), np.uint8) #fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel) #fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_OPEN, kernel) self.current_mask = np.dstack((fgMask, fgMask, fgMask)).astype(np.bool) # mask is 1 channel, color is 3 channels self.current_final_img = np.where(self.current_mask, self.bg_image[:,:,:3], self.current_img) else: self.current_mask = None self.current_final_img = self.current_img self.listener.release(frames) self.display_img(self.current_final_img) def display_img(self, img): buf1 = cv2.flip(img, 0) buf = buf1.tostring() image_texture = Texture.create(size=(img.shape[1], img.shape[0]), colorfmt='rgb') #(480,640) image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte') # display image from the texture self.texture = image_texture
def test_sync_multi_frame(): fn = Freenect2() num_devices = fn.enumerateDevices() assert num_devices > 0 serial = fn.getDefaultDeviceSerialNumber() assert serial == fn.getDeviceSerialNumber(0) device = fn.openDevice(serial) assert fn.getDefaultDeviceSerialNumber() == device.getSerialNumber() device.getFirmwareVersion() listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.start() # Registration registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) undistorted = Frame(512, 424, 4) registered = Frame(512, 424, 4) # optional parameters for registration bigdepth = Frame(1920, 1082, 4) color_depth_map = np.zeros((424, 512), np.int32) # test if we can get two frames at least frames = listener.waitForNewFrame() listener.release(frames) # frames as a first argment also should work frames = FrameMap() listener.waitForNewFrame(frames) color = frames[FrameType.Color] ir = frames[FrameType.Ir] depth = frames[FrameType.Depth] for frame in [ir, depth]: assert frame.exposure == 0 assert frame.gain == 0 assert frame.gamma == 0 for frame in [color]: assert frame.exposure > 0 assert frame.gain > 0 assert frame.gamma > 0 registration.apply(color, depth, undistorted, registered) # with optinal parameters registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth, color_depth_map=color_depth_map.ravel()) registration.undistortDepth(depth, undistorted) assert color.width == 1920 assert color.height == 1080 assert color.bytes_per_pixel == 4 assert ir.width == 512 assert ir.height == 424 assert ir.bytes_per_pixel == 4 assert depth.width == 512 assert depth.height == 424 assert depth.bytes_per_pixel == 4 assert color.asarray().shape == (color.height, color.width, 4) assert ir.asarray().shape == (ir.height, ir.width) assert depth.asarray(np.float32).shape == (depth.height, depth.width) listener.release(frames) def __test_cannot_determine_type_of_frame(frame): frame.asarray() for frame in [registered, undistorted]: yield raises(ValueError)(__test_cannot_determine_type_of_frame), frame # getPointXYZ x, y, z = registration.getPointXYZ(undistorted, 512 // 2, 424 // 2) if not np.isnan([x, y, z]).any(): assert z > 0 # getPointXYZRGB x, y, z, b, g, r = registration.getPointXYZRGB(undistorted, registered, 512 // 2, 424 // 2) if not np.isnan([x, y, z]).any(): assert z > 0 assert np.isfinite([b, g, r]).all() for pix in [b, g, r]: assert pix >= 0 and pix <= 255 device.stop() device.close()
class Kinect: def __init__(self, kinect_num=0): self.fn = Freenect2() self.serial = None self.device = None self.listener = None self.registration = None self._frames = None # frames cache so that the user can use them before we free them self._bigdepth = Frame(1920, 1082, 4) # malloc'd self._undistorted = Frame(512, 424, 4) self._registered = Frame(512, 424, 4) self._color_dump = Frame(1920, 1080, 4) num_devices = self.fn.enumerateDevices() if num_devices <= kinect_num: raise ConnectionError("No Kinect device at index %d" % kinect_num) self.serial = self.fn.getDeviceSerialNumber(kinect_num) self.device = self.fn.openDevice(self.serial, pipeline=pipeline) self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) def close(self): if self.device: self.device.close() def start(self): if self.device: self.device.start() self.registration = Registration( self.device.getIrCameraParams(), self.device.getColorCameraParams()) return self # for convenience else: raise ConnectionError("Connection to Kinect wasn't established") def stop(self): if self.device: self.device.stop() ### If copy is False for these functoins, make sure to call release_frames ### when you're done with the returned frame ### def get_current_color_frame(self, copy=True): self._frames = self.listener.waitForNewFrame() ret = self._convert_color_frame(self._frames["color"], copy=copy) if copy: self.release_frames() return ret def get_current_depth_frame(self, copy=True): self._frames = self.listener.waitForNewFrame() if copy: ret = self._frames["depth"].asarray().copy() self.release_frames() else: ret = self._frames["depth"].asarray() return ret def get_current_rgbd_frame(self, copy=True): self._frames = self.listener.waitForNewFrame() self.registration.apply(self._frames["color"], self._frames["depth"], self._undistorted, self._registered, bigdepth=self._bigdepth) color = self._convert_color_frame(self._frames["color"], copy=copy) depth = self._convert_bigdepth_frame(self._bigdepth, copy=copy) if copy: self.release_frames() return color, depth def get_current_bigdepth_frame(self, copy=True): self._frames = self.listener.waitForNewFrame() self.registration.apply(self._frames["color"], self._frames["depth"], self._undistorted, self._registered, bigdepth=self._bigdepth) depth = self._convert_bigdepth_frame(self._bigdepth, copy=copy) if copy: self.release_frames() return depth def release_frames(self): self.listener.release(self._frames) # single frame calls def get_single_color_frame(self): self.start() ret = self.get_current_color_frame() self.stop() return ret def get_single_depth_frame(self): self.start() ret = self.get_current_depth_frame() self.stop() return ret def _convert_bigdepth_frame(self, frame, copy=True): if copy: d = self._bigdepth.asarray(np.float32).copy() else: d = self._bigdepth.asarray(np.float32) return d[1:-1, ::-1] def _convert_color_frame(self, frame, copy=True): if copy: img = frame.asarray().copy() else: img = frame.asarray() img = img[:, ::-1] img[..., :3] = img[..., 2::-1] # bgrx -> rgbx return img
def test_sync_multi_frame(): fn = Freenect2() num_devices = fn.enumerateDevices() assert num_devices > 0 serial = fn.getDefaultDeviceSerialNumber() assert serial == fn.getDeviceSerialNumber(0) device = fn.openDevice(serial) assert fn.getDefaultDeviceSerialNumber() == device.getSerialNumber() device.getFirmwareVersion() listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.start() # Registration registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) undistorted = Frame(512, 424, 4) registered = Frame(512, 424, 4) # optional parameters for registration bigdepth = Frame(1920, 1082, 4) color_depth_map = np.zeros((424, 512), np.int32) # test if we can get two frames at least frames = listener.waitForNewFrame() listener.release(frames) # frames as a first argment also should work frames = FrameMap() listener.waitForNewFrame(frames) color = frames[FrameType.Color] ir = frames[FrameType.Ir] depth = frames[FrameType.Depth] for frame in [ir, depth]: assert frame.exposure == 0 assert frame.gain == 0 assert frame.gamma == 0 for frame in [color]: assert frame.exposure > 0 assert frame.gain > 0 assert frame.gamma > 0 registration.apply(color, depth, undistorted, registered) # with optinal parameters registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth, color_depth_map=color_depth_map.ravel()) assert color.width == 1920 assert color.height == 1080 assert color.bytes_per_pixel == 4 assert ir.width == 512 assert ir.height == 424 assert ir.bytes_per_pixel == 4 assert depth.width == 512 assert depth.height == 424 assert depth.bytes_per_pixel == 4 assert color.asarray().shape == (color.height, color.width, 4) assert ir.asarray().shape == (ir.height, ir.width) assert depth.asarray(np.float32).shape == (depth.height, depth.width) listener.release(frames) def __test_cannot_determine_type_of_frame(frame): frame.asarray() for frame in [registered, undistorted]: yield raises(ValueError)(__test_cannot_determine_type_of_frame), frame device.stop() device.close()
class KinectV2: """ control class for the KinectV2 based on the Python wrappers of the official Microsoft SDK Init the kinect and provides a method that returns the scanned depth image as numpy array. Also we do gaussian blurring to get smoother surfaces. """ def __init__(self): # hard coded class attributes for KinectV2's native resolution self.name = 'kinect_v2' self.depth_width = 512 self.depth_height = 424 self.color_width = 1920 self.color_height = 1080 self.depth = None self.color = None self._init_device() #self.depth = self.get_frame() #self.color = self.get_color() print("KinectV2 initialized.") def _init_device(self): if _platform == 'Windows': device = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Infrared) elif _platform == 'Linux': if _pylib: # self.device = Device() fn = Freenect2() num_devices = fn.enumerateDevices() assert num_devices > 0 serial = fn.getDeviceSerialNumber(0) self.device = fn.openDevice(serial, pipeline=pipeline) self.listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners self.device.setColorFrameListener(self.listener) self.device.setIrAndDepthFrameListener(self.listener) self.device.start() self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams()) self.undistorted = Frame(512, 424, 4) self.registered = Frame(512, 424, 4) frames = self.listener.waitForNewFrame() self.listener.release(frames) self.device.stop() self.device.close() elif _lib: try: self.device = Device() except: traceback.print_exc() else: print(_platform) raise NotImplementedError def _get_linux_frame(self, typ:str='all'): """ Manage to Args: typ: Returns: """ #self.device.start() frames = self.listener.waitForNewFrame(milliseconds=1000) if frames: if typ == 'depth': depth = frames[FrameType.Depth] self.listener.release(frames) return depth.asarray() elif typ == 'ir': ir = frames[FrameType.Ir] self.listener.release(frames) return ir.asarray() if typ == 'color': color = frames[FrameType.Color] self.listener.release(frames) return color.asarray() if typ == 'all': color = frames[FrameType.Color] ir = frames[FrameType.Ir] depth = frames[FrameType.Depth] self.registration.apply(color, depth, self.undistorted, self.registered) self.registration.undistortDepth(depth, self.undistorted) self.listener.release(frames) return depth.asarray(), color.asarray(), ir.asarray() else: raise FileNotFoundError def get_linux_frame(self, typ:str='all'): """ Manage to Args: typ: Returns: """ frames = {} with self.device.running(): for type_, frame in self.device: frames[type_] = frame if FrameType.Color in frames and FrameType.Depth in frames and FrameType.Ir in frames: break if typ == 'depth': depth = frames[FrameType.Depth].to_array() return depth elif typ == 'ir': ir = frames[FrameType.Ir].to_array() return ir elif typ == 'color': color = frames[FrameType.Color].to_array() resolution_camera = self.color_height * self.color_width # resolution camera Kinect V2 palette = numpy.reshape(color, (resolution_camera, 4))[:, [2, 1, 0]] position_palette = numpy.reshape(numpy.arange(0, len(palette), 1), (self.color_height, self.color_width)) color = palette[position_palette] return color else: color = frames[FrameType.Color].to_array() resolution_camera = self.color_height * self.color_width # resolution camera Kinect V2 palette = numpy.reshape(color, (resolution_camera, 4))[:, [2, 1, 0]] position_palette = numpy.reshape(numpy.arange(0, len(palette), 1), (self.color_height, self.color_width)) color = palette[position_palette] depth = frames[FrameType.Depth].to_array() ir = frames[FrameType.Ir].to_array() return color, depth, ir def get_frame(self): """ Args: Returns: 2D Array of the shape(424, 512) containing the depth information of the latest frame in mm """ if _platform =='Windows': depth_flattened = self.device.get_last_depth_frame() self.depth = depth_flattened.reshape( (self.depth_height, self.depth_width)) # reshape the array to 2D with native resolution of the kinectV2 elif _platform =='Linux': self.depth = self.get_linux_frame(typ='depth') return self.depth def get_ir_frame_raw(self): """ Args: Returns: 2D Array of the shape(424, 512) containing the raw infrared intensity in (uint16) of the last frame """ if _platform=='Windows': ir_flattened = self.device.get_last_infrared_frame() self.ir_frame_raw = numpy.flipud( ir_flattened.reshape((self.depth_height, self.depth_width))) # reshape the array to 2D with native resolution of the kinectV2 elif _platform=='Linux': self.ir_frame_raw = self.get_linux_frame(typ='ir') return self.ir_frame_raw def get_ir_frame(self, min=0, max=6000): """ Args: min: minimum intensity value mapped to uint8 (will become 0) default: 0 max: maximum intensity value mapped to uint8 (will become 255) default: 6000 Returns: 2D Array of the shape(424, 512) containing the infrared intensity between min and max mapped to uint8 of the last frame """ ir_frame_raw = self.get_ir_frame_raw() self.ir_frame = numpy.interp(ir_frame_raw, (min, max), (0, 255)).astype('uint8') return self.ir_frame def get_color(self): """ Returns: """ if _platform == 'Windows': color_flattened = self.device.get_last_color_frame() resolution_camera = self.color_height * self.color_width # resolution camera Kinect V2 # Palette of colors in RGB / Cut of 4th column marked as intensity palette = numpy.reshape(numpy.array([color_flattened]), (resolution_camera, 4))[:, [2, 1, 0]] position_palette = numpy.reshape(numpy.arange(0, len(palette), 1), (self.color_height, self.color_width)) self.color = numpy.flipud(palette[position_palette]) elif _platform =='Linux': self.color = self.get_linux_frame(typ='color') return self.color