def main(): print("pyOniRecorder by Rocco Pietrini v1 \n ") readSettings() try: if sys.platform == "win32": libpath = "lib/Windows" else: libpath = "lib/Linux" print("library path is: ", os.path.join(os.path.dirname(__file__), libpath)) openni2.initialize(os.path.join(os.path.dirname(__file__), libpath)) print("OpenNI2 initialized \n") except Exception as ex: print("ERROR OpenNI2 not initialized", ex, " check library path..\n") return try: dev = openni2.Device.open_any() except Exception as ex: print("ERROR Unable to open the device: ", ex, " device disconnected? \n") return write_files(dev) try: openni2.unload() print("Device unloaded \n") except Exception as ex: print("Device not unloaded: ", ex, "\n")
def __init__(self, device, color_stream_req=True, depth_stream_req=True): openni2.initialize(self.openni_path) self.device = openni2.Device.open_any() self.color_stream_req = color_stream_req self.depth_stream_req = depth_stream_req self.color_width = 640 self.color_height = 480 self.color_fps = 30 self.depth_width = 640 self.depth_height = 480 self.depth_fps = 30 self.use_depth_registration = True self.mirror = False self.color_stream = None self.depth_stream = None self.color_frame = None self.depth_frame = None self.timestamp_color = None self.timestamp_depth = None
def init_camera(self): openni2.initialize() nite2.initialize() self.dev = openni2.Device.open_any() dev_name = self.dev.get_device_info().name.decode('UTF-8') print("Device Name: {}".format(dev_name)) self.use_kinect = False if dev_name == 'Kinect': self.use_kinect = True print('using Kinect.') try: self.user_tracker = nite2.UserTracker(self.dev) except utils.NiteError: print("Unable to start the NiTE human tracker. Check " "the error messages in the console. Model data " "(s.dat, h.dat...) might be inaccessible.") sys.exit(-1) (self.img_w, self.img_h ) = CAPTURE_SIZE_KINECT if self.use_kinect else CAPTURE_SIZE_OTHERS self.win_w = 256 self.win_h = int(self.img_h * self.win_w / self.img_w)
def __init__(self): ######### Register the device################## openni2.initialize() self.dev = openni2.Device.open_any() ########## create the streams ########### #self.rgb_stream = self.dev.create_color_stream() self.depth_stream = self.dev.create_depth_stream() #self.color_stream = self.dev.create_color_stream() self.dev.set_depth_color_sync_enabled(True) # synchronize the streams self.dev.set_image_registration_mode( openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR) self.depth_stram = self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=640, resolutionY=400, fps=30)) # self.depth_stram = self.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480,fps=30)) #self.color_stream = self.color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,resolutionX=640,resolutionY=480,fps=30)) ######### strat the stream ############# self.depth_stream.start() #self.rgb_stream.start() ########################################### ################Mouth############### self.predictor = predictor_path = '/home/odroid/feeding20190221_Hui/shape_predictor_68_face_landmarks.dat' #Register land marks self.detector = dlib.get_frontal_face_detector( ) # detector will get the frontal face self.predictor = dlib.shape_predictor( predictor_path) #Register land marks self.mouth_status = 0
def openni_init(path="."): if path is None: path = "." if path: if not "Redist" in path: if "linux" in sys.platform: path = path.rstrip('/') + "/Redist" elif "win32" in sys.platform: path = path.rstrip('\\') + "\\Redist" try: if (not openni2.is_initialized()): logger.info("OpenNi2 is not Initialized! Initializing.") openni2.initialize(path) return True except Exception as e: logger.error(e) logger.warning("Openni path is: " + path) try: logger.warning("Resorting to standard openni2 initialization") openni2.initialize() return True except Exception as e: logger.fatal(e) return False
def __init__(self, cam_id="rtsp://admin:@192.168.52.52/h265/main/av_stream"): self._cam_id = 0 # cam_id self._fps = 30 try: self._cap = cv2.VideoCapture(self._cam_id) self._cap.set(cv2.CAP_PROP_FPS, self._fps) self._height = self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT) self._width = self._cap.get(cv2.CAP_PROP_FRAME_WIDTH) self._cap.set(cv2.CAP_PROP_BRIGHTNESS, 0) print self._cap.get(cv2.CAP_PROP_BRIGHTNESS) print self._cap.set(cv2.CAP_PROP_BACKLIGHT, -64) openni2.initialize() self.device = openni2.Device.open_any() print 'initialized' self.depth_stream = self.device.create_depth_stream() self.depth_stream.set_mirroring_enabled(False) self.corlor_stream = self.device.create_color_stream() self.corlor_stream.set_mirroring_enabled(False) self.device.set_depth_color_sync_enabled(True) self.depth_stream.start() self.corlor_stream.start() self.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=640, resolutionY=480, fps=self._fps)) # self.corlor_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480, fps=1)) except Exception as e: print e
def __init__(self): # Initialize the depth device openni2.initialize( dll_directories= "/home/minhdq99hp/OpenNi/OpenNI-Linux-x64-2.3/Redist/") self.dev = openni2.Device.open_any() self.depth_stream = None self.color_stream = None self.depth_frame = None self.color_frame = None self.depth_left_roi = None self.color_left_roi = None self.depth_right_roi = None self.color_right_roi = None self.depth_main_roi = None self.color_main_roi = None # Settings self.noMirror = True self.cropEnable = True self.cropX = 46 self.cropY = 27 self.cropWidth = 550 self.cropHeight = 420
def __init__(self): openni2.initialize() # can also accept the path of the OpenNI redistribution self.dev = openni2.Device.open_any() print(self.dev.get_device_info()) #depth_stream = dev.create_depth_stream() self.color_stream = self.dev.create_color_stream() #depth_stream.start() self.color_stream.start() self.cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") ## 读入分类器数据
def startApp(): openni2.initialize( ) # can also accept the path of the OpenNI redistribution dev = openni2.Device.open_any() print(dev.get_device_info()) depth_stream = dev.create_depth_stream() depth_stream.start() punkty = [] def getFrame(readFrame): frame_data = readFrame.get_buffer_as_uint16() img = np.frombuffer(frame_data, dtype=np.uint16) img.shape = (480, 640) return img while (True): img = getFrame(depth_stream.read_frame()) img = np.ma.masked_equal(img, 0.0, copy=True) #moze nie bedzie potrzebny indexClosestToCamera = img.argmin() j, i = np.unravel_index(indexClosestToCamera, img.shape) point = (i, j) dlX = 350 dlY = 300 xStart = 120 yStart = 120 czyWGranicachPionowych = lambda p: xStart <= p[0] < (xStart + dlX) czyWGranicachPoziomych = lambda p: yStart <= p[1] < (yStart + dlY) if czyWGranicachPionowych(point) and czyWGranicachPoziomych(point): pixelValueNearestToCamera = img.min() print( str(j) + " " + str(i) + "->" + str(pixelValueNearestToCamera)) # if 1700 > pixelValueNearestToCamera > 1200: cv2.circle(img, (i, j), 30, (0, 0, 0), 5) punkty.append((i, j)) if pixelValueNearestToCamera > 1400 and len(punkty) > 30: result = loadArtificialNeuralNetwork(punkty) print(result) break # cv2.line(img,(xStart, yStart), (xStart+dlX, yStart), (0,0,0), 5) # cv2.line(img,(xStart+dlX, yStart), (xStart+dlX, yStart+dlY), (0,0,0), 5) # cv2.line(img,(xStart+dlX, yStart+dlY), (xStart, yStart+dlY), (0,0,0), 5) # cv2.line(img,(xStart, yStart+dlY), (xStart, yStart), (0,0,0), 5) cv2.imshow("Malgorzata Niewiadomska Inzynieria Biomedyczna", img) if cv2.waitKey(1) & 0xFF == ord('z'): break depth_stream.stop() openni2.unload() print(punkty)
def __init__(self, player_type: int) -> None: """Types: SENSOR_{COLOR|DEPTH}.""" self.player_type: int = player_type self._name: str = '' self._position: int = -1 self._stream: openni2.VideoStream = None self._frames: List[openni2.VideoFrame] = [] openni2.initialize()
def __init__(self, use_thread=False, sleep_dt=0., verbose=False): """ Initialize the Kinect input interface using the `openni` library. Args: use_thread (bool): If True, it will run the interface in a separate thread than the main one. The interface will update its data automatically. sleep_dt (float): If :attr:`use_thread` is True, it will sleep the specified amount before acquiring the next sample. verbose (bool): If True, it will print information about the state of the interface. This is let to the programmer what he / she wishes to print. """ # initialize openni2; you can give the path to the library as an argument. Otherwise, it will look for # OPENNI2_REDIST and OPENNI2_REDIST64 environment variables. openni2.initialize() # open all the devices devices = openni2.Device.open_all() # get the correct device (Microsoft Kinect) self.device = None for device in devices: info = device.get_device_info() if info.vendor == 'Microsoft' and info.name == 'Kinect': # Kinect Interface self.device = device break # If didn't find it, return an error if self.device is None: devices = [device.get_device_info() for device in devices] raise ValueError( "No Asus devices were detected; we found these devices instead: {}" .format(devices)) if verbose: print(self.device.get_device_info()) # create RGB and depth streams self.rgb_stream = self.device.create_color_stream() self.depth_stream = self.device.create_depth_stream() # start the streams self.rgb_stream.start() self.depth_stream.start() # data self.rgb = None self.depth = None super(OpenNIKinectInterface, self).__init__(use_thread=use_thread, sleep_dt=sleep_dt, verbose=verbose)
def __enter__(self): openni2.initialize("/home/sam/OpenNI-Linux-x64-2.3/Redist/") self.dev = openni2.Device(None, ) # same as Device.open_any() self._start_depth_stream() self._start_rgb_stream() # Docs at https://s3.amazonaws.com/com.occipital.openni/OpenNI_Programmers_Guide.pdf self.dev.set_image_registration_mode(True) self.dev.set_depth_color_sync_enabled(True) return self
def initialize(dll_directories=_default_dll_directories): global _nite2_initialized global loaded_dll_directory if _nite2_initialized: return if isinstance(dll_directories, str): dll_directories = [dll_directories] if not openni2.is_initialized(): openni2.initialize() if loaded_dll_directory: c_api.niteInitialize() _nite2_initialized = True return found = False prev = os.getcwd() exceptions = [] dll_directories = [ os.path.normpath(os.path.abspath(d)) for d in dll_directories ] for dlldir in dll_directories: if not os.path.isdir(dlldir): exceptions.append((dlldir, "Directory does not exist")) continue fullpath = os.path.join(dlldir, _dll_name) if not os.path.isfile(fullpath): exceptions.append((fullpath, "file does not exist")) continue try: os.chdir(dlldir) c_api.load_dll(fullpath) c_api.niteInitialize() except Exception as ex: exceptions.append((fullpath, ex)) else: found = True loaded_dll_directory = dlldir break os.chdir(prev) if not found: raise InitializationError( "NiTE2 could not be loaded:\n %s" % ("\n ".join("%s: %s" % (dir, ex) for dir, ex in exceptions)), ) _nite2_initialized = True
def _ready(self): openni2.initialize() dev = openni2.Device.open_any() print(dev.get_device_info()) print(dev.get_sensor_info(openni2.SENSOR_DEPTH).videoModes) depth_stream = dev.create_depth_stream() depth_stream.configure_mode(640, 480, 30, openni2.PIXEL_FORMAT_DEPTH_1_MM) depth_stream.register_new_frame_listener(self.frame_new) depth_stream.start() self.depth_texture.create(640, 480, Image.FORMAT_L8, Texture.FLAG_VIDEO_SURFACE) pass
def open_astra_device(cfg, image_size): openni2.initialize(cfg["openniRedist"]) dev = openni2.Device.open_any() print(dev.get_device_info()) fps = cfg["cameras"]["fps"] ir_source = ImageSourceInfrared(dev, fps, image_size) depth_source = ImageSourceDepth(dev, fps, image_size, align_depth_to_color=True) color_source = ImageSourceCv(cfg["cameras"]["depthCameraColor"], fps) return ir_source, depth_source, color_source
def __init__(self): super().__init__() self.setupUi(self) self.oni_video: VideoOni = None self.open_btn.clicked.connect(self.open_file) self.play_btn.clicked.connect(self.play_pause) self.slider.sliderMoved.connect(self.set_position) self.check_box.clicked.connect(self.set_sensor) self.back_frame.clicked.connect(self.skip_back) self.forward_frame.clicked.connect(self.skip_forward) # инициализация драйвера openni2.initialize() self.show()
def __init__(self, debug=False): self.debug = debug self._scale_depth = True self._scale_rgb_colors = False openni2.initialize("/usr/lib") dev = openni2.Device.open_any() if debug: print(dev.get_device_info()) dev.set_image_registration_mode(True) dev.set_depth_color_sync_enabled(True) # create depth stream self.depth_stream = dev.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=Depth_ResX, resolutionY=Depth_ResY, fps=Depth_fps, )) depth_sensor_info = self.depth_stream.get_sensor_info() self.max_depth = self.depth_stream.get_max_pixel_value() self.min_depth = 0 if self.debug: for itm in depth_sensor_info.videoModes: print(itm) print("Min depth value: {}".format(self.min_depth)) print("Max depth value: {}".format(self.max_depth)) self.rgb_stream = dev.create_color_stream() self.rgb_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=RGB_ResX, resolutionY=RGB_ResY, fps=RGB_fps, )) if self.debug: rgb_sensor_info = self.rgb_stream.get_sensor_info() for itm in rgb_sensor_info.videoModes: print(itm)
def openDevice(video_path): try: if sys.platform == "win32": libpath = "lib/Windows" else: libpath = "lib/Linux" openni2.initialize(libpath) dev = openni2.Device.open_file(video_path) pbs = openni2.PlaybackSupport(dev) pbs.set_repeat_enabled(True) pbs.set_speed(-1.0) return dev, pbs except Exception as ex: print(ex) raise Exception("Initialization Error")
def initialize(dll_directories=_default_dll_directories): global _nite2_initialized global loaded_dll_directory if _nite2_initialized: return if isinstance(dll_directories, str): dll_directories = [dll_directories] if not openni2.is_initialized(): openni2.initialize() if loaded_dll_directory: c_api.niteInitialize() _nite2_initialized = True return found = False prev = os.getcwd() exceptions = [] dll_directories = [os.path.normpath(os.path.abspath(d)) for d in dll_directories] for dlldir in dll_directories: if not os.path.isdir(dlldir): exceptions.append((dlldir, "Directory does not exist")) continue fullpath = os.path.join(dlldir, _dll_name) if not os.path.isfile(fullpath): exceptions.append((fullpath, "file does not exist")) continue try: os.chdir(dlldir) c_api.load_dll(fullpath) c_api.niteInitialize() except Exception as ex: exceptions.append((fullpath, ex)) else: found = True loaded_dll_directory = dlldir break os.chdir(prev) if not found: raise InitializationError("NiTE2 could not be loaded:\n %s" % ("\n ".join("%s: %s" % (dir, ex) for dir, ex in exceptions)),) _nite2_initialized = True
def getVideo(file_name): OP.initialize() framesColor = [] framesDepth = [] file = OP.Device(file_name) file.set_depth_color_sync_enabled cStream = OP.VideoStream(file, OP.SENSOR_COLOR) dStream = OP.VideoStream(file, OP.SENSOR_DEPTH) cStream.start() dStream.start() for i in range(cStream.get_number_of_frames()): framesColor.append(cStream.read_frame()) framesDepth.append(dStream.read_frame()) print(framesDepth[len(framesDepth) - 1]._frame) cStream.stop() dStream.stop() return framesColor, framesDepth
def start(self): """ Start the sensor """ print('Starting the PrimeSense Sensor...') # open device # openni2.initialize(PrimesenseSensor.OPENNI2_PATH) openni2.initialize() self._device = openni2.Device.open_any() # open depth stream self._depth_stream = self._device.create_depth_stream() self._depth_stream.configure_mode(PrimesenseSensor.DEPTH_IM_WIDTH, PrimesenseSensor.DEPTH_IM_HEIGHT, PrimesenseSensor.FPS, openni2.PIXEL_FORMAT_DEPTH_1_MM) self._depth_stream.start() # open color stream self._color_stream = self._device.create_color_stream() self._color_stream.configure_mode(PrimesenseSensor.COLOR_IM_WIDTH, PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.FPS, openni2.PIXEL_FORMAT_RGB888) self._color_stream.camera.set_auto_white_balance( self._auto_white_balance) self._color_stream.camera.set_auto_exposure(self._auto_exposure) self._color_stream.start() # configure device if self._registration_mode == PrimesenseRegistrationMode.DEPTH_TO_COLOR: self._device.set_image_registration_mode( openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR) else: self._device.set_image_registration_mode( openni2.IMAGE_REGISTRATION_OFF) self._device.set_depth_color_sync_enabled( self._enable_depth_color_sync) self._running = True print('Finish Starting the Sensor!')
def start(self, device_serial=None, size=(1280, 720)): openni2.initialize() self.dev = openni2.Device.open_any() if self.dev.has_sensor(openni2.SENSOR_COLOR): self.stream = self.dev.create_stream(openni2.SENSOR_COLOR) sensor_info = self.stream.get_sensor_info() for videoMode in sensor_info.videoModes: if videoMode.pixelFormat == _openni2.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888: self.stream.set_video_mode(videoMode) break self.stream.start() if self.dev.has_sensor(openni2.SENSOR_DEPTH): self.stream1 = self.dev.create_stream(openni2.SENSOR_DEPTH) sensor_info1 = self.stream1.get_sensor_info() self.stream1.set_video_mode( sensor_info1.videoModes[len(sensor_info1.videoModes) - 1]) self.stream1.start()
def __init__(self, fps_window=30): # Camera parameters self.max_range = 4095 self.height = 60 self.width = 80 self.index = 0 # Variables used for fps estimation self.depth_fps_estimator = FpsEstimator(30.0, fps_window, "[Driver] Depth stream") self.ir_fps_estimator = FpsEstimator(30.0, fps_window, "[Driver] IR stream") #Connect and Open Device if platform.system() == "Windows": initialize( "C:/Program Files/OpenNI2/Redist") #specify path for Redist else: initialize( ) ####################################### adds 45.5 MiB self.dev = Device.open_any( ) ####################################### adds 6.7 MiB self.serial_number = (self.dev.get_property( ONI_DEVICE_PROPERTY_SERIAL_NUMBER, (c_char * 100)).value).decode('utf-8') #Start IR Streaming # self.ir_frame = np.zeros((self.height, self.width)) # self.ir_stream = self.dev.create_ir_stream() # self.ir_frame_ready = Event() # self.ir_stream.register_new_frame_listener(self.ir_frame_callback) # self.ir_stream.start() #Start Depth Streaming self.depth_frame = zeros((self.height, self.width)) self.depth_stream = self.dev.create_depth_stream() self.depth_frame_ready = Event() self.depth_stream.register_new_frame_listener( self.depth_frame_callback) self.depth_stream.start()
def stream(self): if self.file: openni2.initialize(self.PATH_TO_OpenNI_LIB) dev = openni2.Device.open_file(self.file.encode('utf-8')) self.ps = PlaybackSupport(dev) if self.stream_type == self.COLOR_STREAM: self.current_stream = dev.create_color_stream() else: self.current_stream = dev.create_depth_stream() self.number_of_frames = self.current_stream.get_number_of_frames() if self.stream_type == self.COLOR_STREAM: self.number_of_frames -= 1 self.current_stream.start() # Loop while self.is_running & (self.counter <= self.number_of_frames ) and self.isRunning(): # Crash here on big file without this try: self.ps.seek(self.current_stream, self.counter) except Exception as e: print(e) # Put the depth frame into a numpy array and reshape it frame = self.current_stream.read_frame() if self.stream_type == self.COLOR_STREAM: self.ps.set_speed(self.DEFAULT_SPEED) frame_data = self.color_stream(frame) else: self.ps.set_speed(self.DEPTH_SPEED) frame_data = self.depth_stream(frame) self.counter += 1 return frame_data
def __init__(self, depth_shape=(480, 640), file_name=""): """OPENNI Camera rtl test and device definition Args: depth_shape (tuple, optional): frame shape supported by camera default. Defaults to (480, 640). file_name (str, optional): use file_name="xxxxx" to enable recorded file. Defaults to "". """ try: self.depth_shape = depth_shape from openni import openni2 try: openni2.initialize() if file_name == "": self.device = openni2.Device.open_any() else: self.device = openni2.Device.open_file(bin(file_name)) except Exception as e: print(repr(e)) print( "OPENNI2 Runtime Library not Found!!! Make sure your LD_LIBRARY_PATH or path-of-current containing runtime file." ) except ImportError: print("Use `pip3 install openni` first")
def main(): cap = cv2.VideoCapture(0) (ret, color_frame) = cap.read() openni2.initialize() device = openni2.Device.open_any() device.set_depth_color_sync_enabled(True) depth_stream = device.create_depth_stream() depth_stream.start() while True: print("#################") frame = depth_stream.read_frame() ret, color_frame = cap.read() frame_data = frame.get_buffer_as_uint8() position = openni2.convert_depth_to_world(depth_stream, 100, 100, frame_data[10000]) print position cv2.imshow("image", color_frame) if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyAllWindows() break depth_stream.stop() openni2.unload()
def getOrbbec(): # 记载 openni try: if sys.platform == "win32": libpath = "lib/Windows" else: libpath = "lib/Linux" print("library path is: ", os.path.join(os.path.dirname(__file__), libpath)) openni2.initialize(os.path.join(os.path.dirname(__file__), libpath)) print("OpenNI2 initialized \n") except Exception as ex: print("ERROR OpenNI2 not initialized", ex, " check library path..\n") return # 加载 orbbec 相机 try: device = openni2.Device.open_any() return device except Exception as ex: print("ERROR Unable to open the device: ", ex, " device disconnected? \n") return
def get_video(file_name, progress_bar): openni2.initialize() frames_color = [] frames_depth = [] file = openni2.Device(file_name) # Открываем потоки для чтения данных из файла c_stream = openni2.VideoStream(file, openni2.SENSOR_COLOR) d_stream = openni2.VideoStream(file, openni2.SENSOR_DEPTH) c_stream.start() d_stream.start() progress_bar.setValue(1) progress_bar.setVisible(True) per_cent = d_stream.get_number_of_frames() // 100 for i in range(d_stream.get_number_of_frames()): # Добавляем загруженные фреймы в общий список (frames_depth) depth_frame = d_stream.read_frame() # From https://stackoverflow.com/a/55539208/8245749 depth_frame_data = depth_frame.get_buffer_as_uint16() depth_img = np.frombuffer(depth_frame_data, dtype=np.uint16) img8 = (depth_img / 256).astype(np.uint8) img8 = ((img8 - img8.min()) / (img8.ptp() / 255)).astype(np.uint8) frames_depth.append(img8.repeat(4)) # добавляем загруженные фреймы в общий список (frames_color) color_frame = c_stream.read_frame() frames_color.append(color_frame) progress_bar.setValue(i // per_cent) c_stream.stop() d_stream.stop() return frames_color, frames_depth
def init_capture_device(self): openni2.initialize() nite2.initialize() return openni2.Device.open_any()
def init_sensor(self): openni2.initialize() dev = openni2.Device.open_all() return dev
'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;' import pyopenpose as op else: # Change these variables to point to the correct folder (Release/x64 etc.) sys.path.append('../../python') # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it. # sys.path.append('/usr/local/python') from openpose import pyopenpose as op except ImportError as e: print( 'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?' ) raise e # Drive Kinect openni2.initialize() dev = openni2.Device.open_any() print('opening kinect', dev.get_device_info()) depth_stream = dev.create_depth_stream() color_stream = dev.create_color_stream() depth_stream.start() color_stream.start() # Custom Params (refer to include/openpose/flags.hpp for more parameters) params = dict() params["model_folder"] = "../../../models/" params["hand"] = True params["disable_multi_thread"] = True params["number_people_max"] = 1 params["hand_detector"] = 0
return {name: transformation_matrix(user, joint_code) for name, joint_code in joints.items()} if __name__ == "__main__": logging.basicConfig(level=logging.INFO) import argparse parser = argparse.ArgumentParser() parser.add_argument("world", help="Underworlds world to monitor") #parser.add_argument("-d", "--debug", help="run in interactive, debug mode", action="store_true") args = parser.parse_args() ### OpenNI/NiTE initialization openni2.initialize() nite2.initialize() logger.info("Opening a freenect device...") dev = openni2.Device.open_any() info = dev.get_device_info() logger.info("Device <%s %s> successfully opened." % (info.vendor, info.name)) logger.info("Loading the NiTE user tracker...") try: userTracker = nite2.UserTracker(dev) except utils.NiteError as ne: logger.error("Unable to start the NiTE human tracker. Check " "the error messages in the console. Model data " "(s.dat, h.dat...) might be missing.") sys.exit(-1)