def write_files(dev): """ Captures the point cloud and write it on a Oni file. """ depth_stream = dev.create_depth_stream() color_stream = dev.create_color_stream() print(dev.get_sensor_info(openni2.SENSOR_DEPTH)) depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320, resolutionY=240, fps=30)) color_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=320, resolutionY=240, fps=30)) depth_stream.start() color_stream.start() dev.set_image_registration_mode(True) rec = openni2.Recorder(time.strftime("%Y%m%d%H%M") + ".oni") rec.attach(depth_stream) rec.attach(color_stream) print(rec.start()) input("Press enter to terminate the recording ...") rec.stop() depth_stream.stop() color_stream.stop()
def start(self): #If the object has not been started before if not self._started: #Setup device openni2.initialize() self._device = openni2.Device.open_any() self._sensor_info = self._device.get_sensor_info( openni2.SENSOR_DEPTH) #Create Streams self.depth_stream = self._device.create_depth_stream() self.color_stream = self._device.create_color_stream() #Configure streams self.depth_stream.set_video_mode( c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat. ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=self.depth_config[0], resolutionY=self.depth_config[1], fps=self.depth_config[2])) self.color_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=self.color_config[0], resolutionY=self.color_config[1], fps=self.color_config[2])) #Start streams self.depth_stream.start() self.color_stream.start() #Update start variable self._started = True return self._device
def __init__(self): super(Camera, self).__init__() openni2.initialize(sys.path[1] + '/src/modules') print(sys.path[1] + '/src/modules') self.dev = openni2.Device.open_any() # rgb stream self.rgb_stream = self.dev.create_color_stream() self.rgb_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=640, resolutionY=480, fps=30)) self.rgb_stream.start() # start stream # depth stream self.depth_stream = self.dev.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=320, resolutionY=240, fps=30)) self.depth_stream.start() # start stream
def setup_camera(w, h, fps): ## Initialize OpenNi # dist = './driver/OpenNI-Linux-x64-2.3/Redist' dist = './driver/OpenNI-Windows-x64-2.3/Redist' openni2.initialize(dist) if (openni2.is_initialized()): print("openNI2 initialized") else: print("openNI2 not initialized") ## Register the device dev = openni2.Device.open_any() ## Create the streams stream rgb_stream = dev.create_color_stream() depth_stream = dev.create_depth_stream() ## Configure the rgb_stream -- changes automatically based on bus speed rgb_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=w, resolutionY=h, fps=fps)) ## Configure the depth_stream -- changes automatically based on bus speed # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=w, resolutionY=h, fps=fps)) ## Check and configure the mirroring -- default is True ## Note: I disable mirroring # print 'Mirroring info1', depth_stream.get_mirroring_enabled() depth_stream.set_mirroring_enabled(False) rgb_stream.set_mirroring_enabled(False) ## Start the streams rgb_stream.start() depth_stream.start() ## Synchronize the streams dev.set_depth_color_sync_enabled(True) # synchronize the streams ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream) dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR) return rgb_stream, depth_stream
def start(self): # Register the device try: dev = openni2.Device.open_any() except OpenNIError as exc: print("Unable to open any depth camera:", exc) raise Exception("Unable to open any depth camera") # Create the streams stream self.depth_stream = dev.create_depth_stream() # Configure the depth_stream -- changes automatically based on bus speed print('Get b4 video mode', self.depth_stream.get_video_mode()) self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320, resolutionY=240, fps=30)) # Check and configure the mirroring -- default is True # print 'Mirroring info1', depth_stream.get_mirroring_enabled() self.depth_stream.set_mirroring_enabled(False) # Start the streams self.depth_stream.start() return True
def createColor(self,x=640,y=480,fps = 30): self.rgb_stream = self.dev.create_color_stream() self.rgb_stream.set_video_mode(c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,resolutionX = x, resolutionY = y,fps = fps)) self.rgb_stream.set_mirroring_enabled(False) print("Intialize the Color Camera")
def openKinectStream(): """ it takes around 1.5s to open the depth stream so it's better to reuse an open stream :return: """ if not config.openKinectFailureLogged: config.log(f"try to load openni2 driver", publish=False) try: openni2.initialize("C:/Program Files (x86)/OpenNI2/Tools/") config.dev = openni2.Device.open_any() #if standAlone: # print(dev.get_sensor_info(c_api.OniSensorType.ONI_SENSOR_COLOR)) config.depth_stream = config.dev.create_depth_stream() if config.depth_stream is None: if not config.openKinectFailureLogged: config.log(f"could not acquire depth_stream", publish=False) return False config.depth_stream.start() config.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480, fps=30)) config.openKinectFailureLogged = False config.log(f"depth stream startet") return True except Exception as e: if not config.openKinectFailureLogged: config.log(f"openKinectStream failed, {e}", publish=not config.openKinectFailureLogged) config.log(f"in case 12 V is available check for 'Kinect for Windows' in DeviceManager, should show 4 subentries", publish = not config.openKinectFailureLogged) config.openKinectFailureLogged = True closeKinectStream() return False
def run(self): print("Start depth image thread") # Start camera stream depth_stream = self.device.create_depth_stream() depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=320, resolutionY=240, fps=30)) depth_stream.start() # Get image continuously while not gs.exit_signal: frame = depth_stream.read_frame() frame_data = frame.get_buffer_as_uint16() img = np.frombuffer(frame_data, dtype=np.uint16) img.shape = (240, 320) img = cv2.flip(img, 1) put_to_queue_no_wait_no_block(img, gs.depth_frames) print("Exiting from Depth image grabber")
def createDepth(self,x=640,y=480,fps=30): self.depth_stream = self.dev.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=x, resolutionY=y, fps=fps)) self.depth_stream.set_mirroring_enabled(False) print("Initialize the Depth Camera")
def ir_stream_init(dev): stream = dev.create_ir_stream() stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_GRAY16, resolutionX=640, resolutionY=480, fps=30)) return stream
def initColor(self, x, y, fps): self.rgb_stream = self.device.create_color_stream() self.rgb_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=x, resolutionY=y, fps=fps)) self.rgb_stream.start()
def init_depth_stream(): global depth_stream openni2.initialize() # This file should be in the same directory as OpenNI2.dll kinect = openni2.Device.open_any() depth_stream = kinect.create_depth_stream() depth_stream.set_video_mode( c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=width, resolutionY=height, fps=25)) depth_stream.start() time.sleep(1) # Sleep and wait for the kinect to be ready
def depth_stream_init(dev): stream = dev.create_depth_stream() stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, # pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=640, resolutionY=480, fps=30)) return stream
def createDepth(self): self.depth_stream = self.dev.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=self.x, resolutionY=self.y, fps=selfself.fps)) self.depth_stream.set_mirroring_enabled(self.depth_mirror) print("Initialize the Depth Camera")
def _rgb_stream_from_device(device): rgb_stream = device.create_color_stream() print("The rgb video mode is", rgb_stream.get_video_mode()) rgb_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=320, resolutionY=240, fps=30)) return rgb_stream
def _depth_stream_from_device(device): depth_stream = device.create_depth_stream() print("The depth video mode is", depth_stream.get_video_mode()) depth_stream.set_video_mode(c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320, resolutionY=240, fps=30 )) depth_stream.set_mirroring_enabled(False) return depth_stream
def getDepthStream(self): self.depth_stream = self.device.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=self.width, resolutionY=self.height, fps=30)) self.depth_stream.set_mirroring_enabled(False)
def __init__(self, file_pass): openni2.initialize(file_pass) dev = openni2.Device.open_any() self.color_stream = dev.create_color_stream() self.color_stream.start() self.color_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=640, resolutionY=480, fps=30)) self.depth_stream = dev.create_depth_stream() self.depth_stream.start() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480, fps=30))
def __init__(self): path = '/home/pi/devel/OpenNI2/Packaging/OpenNI-Linux-Arm-2.2/Redist' openni2.initialize(path) # can also accept the path of the OpenNI redistribution dev = openni2.Device.open_any() print dev.get_sensor_info(openni2.SENSOR_DEPTH) self.depth_stream = dev.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320, resolutionY=240, fps=30)) self.depth_stream.start()
def initDepth(self, x, y, fps): self.depth_stream = self.device.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=x, resolutionY=y, fps=fps)) self.depth_stream.set_mirroring_enabled(True) self.depth_stream.start() print("Depth Camera Initialized")
def __init__(self): super(ImageProcessing, self).__init__() openni2.initialize(sys.path[1] + '/src/modules') print(sys.path[1] + '/src/modules') self.dev = openni2.Device.open_any() # rgb stream self.rgb_stream = self.dev.create_color_stream() self.rgb_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=640, resolutionY=480, fps=30)) self.rgb_stream.start() # start stream # depth stream self.depth_stream = self.dev.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=320, resolutionY=240, fps=30)) self.depth_stream.start() # start stream # output folder self.oDir = sys.path[1] + '/output/' + time.strftime( "%Y-%m-%d_%H-%M-%S") self.oDirs = {'rgb': self.oDir + '/rgb', 'depth': self.oDir + '/depth'} for key in self.oDirs: odir = self.oDirs[key] if not os.path.isdir(odir): os.makedirs(odir) fourcc = cv2.VideoWriter_fourcc(*'MJPG') cf.vid_out_rgb = cv2.VideoWriter(self.oDir + '/output_rgb.avi', fourcc, 30.0, (cf.WIDTH, cf.HEIGHT)) # out video
def recordOni(): depthStream = dev.create_depth_stream() colorStream = dev.create_color_stream() depthStream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=640, resolutionY=480, fps=30)) colorStream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=1280, resolutionY=720, fps=30)) dev.set_image_registration_mode(True) dev.set_depth_color_sync_enabled(True) depthStream.set_mirroring_enabled(True) colorStream.set_mirroring_enabled(True) depthStream.start() colorStream.start() recorder = openni2.Recorder((path + ".oni").encode('utf-8')) recorder.attach(depthStream) recorder.attach(colorStream) print("Press 's' to start recording") keyboard.wait("s") print() recorder.start() print("Recording...\nPress 'q' to stop recording") keyboard.wait("q") print() recorder.stop() depthStream.stop() colorStream.stop()
def __init__(self): openni2.initialize( "/home/pi/Downloads/Linux/OpenNI-Linux-Arm-2.3/Redist") self.dev = openni2.Device.open_any() self.depth_stream = self.dev.create_depth_stream() self.color_stream = self.dev.create_color_stream() self.dev.set_depth_color_sync_enabled(True) self.depth_stream.start() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320, resolutionY=240, fps=30)) self.color_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=320, resolutionY=240, fps=30)) self.color_stream.start() self.dev.set_image_registration_mode( openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
def __init__(self, width=320, height=240, fps=30): if not openni2.is_initialized(): openni2.initialize("/usr/lib/") self.width = width self.height = height # maybe change this to a more specific device self.device = openni2.Device.open_any() self.rgb_stream = self.device.create_color_stream() self.rgb_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=width, resolutionY=height, fps=fps)) self.rgb_stream.start()
def __init__(self, width=320, height=240, fps=30): if not openni2.is_initialized(): openni2.initialize("/usr/lib/") self.width = width self.height = height # maybe change this to a more specific device self.device = openni2.Device.open_any() self.depth_stream = self.device.create_depth_stream() self.depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=width, resolutionY=height, fps=30)) # self.depth_stream.set_mirroring_enabled(False) self.depth_stream.start()
def run(self): # Start camera stream rgb_stream = self.device.create_color_stream() rgb_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=320, resolutionY=240, fps=30)) rgb_stream.start() # Get image continuously while not gs.exit_signal: img = np.fromstring(rgb_stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8).reshape(240, 320, 3) bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) put_to_queue_no_wait_no_block(bgr, gs.rgb_frames)
def kinectInit(logIP, logPort): global navManager, depth_stream try: navManager = rpyc.connect(logIP, logPort) navManager.root.connectionStatus("kinect", True) log("logger connection established") except: print( f"kinect - could not establish connection to logger {sys.exc_info()[0]}" ) raise SystemExit() # try to capture the depth data try: openni2.initialize("C:/Program Files (x86)/OpenNI2/Redist/") dev = openni2.Device.open_any() depth_stream = dev.create_depth_stream() depth_stream.start() depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480, fps=30)) frame = depth_stream.read_frame() log("depth data successfully captured") navManager.root.processStatus("kinect", True) except: log(f"capturing depth data failed {sys.exc_info()[0]}, 12 V on??") try: openni2.unload() except: pass raise SystemExit()
def init(w, h, f): global depth_width, depth_height, depth_fps, depth_stream, color_stream depth_width = w depth_height = h depth_fps = f dev = openni2.Device.open_any() print(dev.get_device_info()) depth_stream = dev.create_depth_stream() color_stream = dev.create_color_stream() depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=depth_width, resolutionY=depth_height, fps=depth_fps)) depth_stream.set_mirroring_enabled(False) color_stream.start() depth_stream.start()
openni2.initialize(dist) if (openni2.is_initialized()): print "openNI2 initialized" else: print "openNI2 not initialized" # Register the device prime = openni2.Device.open_any() # Create the streams rgb_stream = prime.create_color_stream() depth_stream = prime.create_depth_stream() # Configure the depth_stream -- changes automatically based on bus speed # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration depth_stream.set_video_mode( c_api.OniVideoMode( pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320, resolutionY=240, fps=30)) # Start the streams rgb_stream.start() depth_stream.start() # Synchronize the streams prime.set_depth_color_sync_enabled(True) # IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream) prime.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR) def get_rgb(): """ Returns numpy 3L ndarray to represent the rgb image. """
def main(argv=None): print('Hello! This is XXXXXX Program') ## Load PointNet config parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, default='./seg/seg_model_1.pth', help='model path') opt = parser.parse_args() print(opt) ## Load PointNet model num_points = 2700 classifier = PointNetDenseCls(num_points=num_points, k=10) classifier.load_state_dict(torch.load(opt.model)) classifier.eval() ### Config visualization cmap = plt.cm.get_cmap("hsv", 5) cmap = np.array([cmap(i) for i in range(10)])[:, :3] # gt = cmap[seg - 1, :] ## Initialize OpenNi # dist = './driver/OpenNI-Linux-x64-2.3/Redist' dist = './driver/OpenNI-Windows-x64-2.3/Redist' openni2.initialize(dist) if (openni2.is_initialized()): print("openNI2 initialized") else: print("openNI2 not initialized") ## Register the device dev = openni2.Device.open_any() ## Create the streams stream rgb_stream = dev.create_color_stream() depth_stream = dev.create_depth_stream() ## Define stream parameters w = 320 h = 240 fps = 30 ## Configure the rgb_stream -- changes automatically based on bus speed rgb_stream.set_video_mode( c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=w, resolutionY=h, fps=fps)) ## Configure the depth_stream -- changes automatically based on bus speed # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration depth_stream.set_video_mode( c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=w, resolutionY=h, fps=fps)) ## Check and configure the mirroring -- default is True ## Note: I disable mirroring # print 'Mirroring info1', depth_stream.get_mirroring_enabled() depth_stream.set_mirroring_enabled(False) rgb_stream.set_mirroring_enabled(False) ## Start the streams rgb_stream.start() depth_stream.start() ## Synchronize the streams dev.set_depth_color_sync_enabled(True) # synchronize the streams ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream) dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR) saving_folder_path = './shapenetcore_partanno_segmentation_benchmark_v0/tools/' if not os.path.exists(saving_folder_path): os.makedirs(saving_folder_path+'RGB') os.makedirs(saving_folder_path+'D') os.makedirs(saving_folder_path+'PC') os.makedirs(saving_folder_path+'points') os.makedirs(saving_folder_path+'points_label') from config import CAMERA_CONFIG ## main loop s = 1000 done = False while not done: key = cv2.waitKey(1) & 255 ## Read keystrokes if key == 27: # terminate print("\tESC key detected!") done = True elif chr(key) == 's': # screen capture print("\ts key detected. Saving image {}".format(s)) rgb = rgb[60:180, 80:240, :] dmap = dmap[60:180, 80:240] ply_content, points_content = generate_ply_from_rgbd(rgb=rgb, depth=dmap, config=CAMERA_CONFIG) cv2.imwrite(saving_folder_path + "RGB/" + str(s) + '.png', rgb) cv2.imwrite(saving_folder_path + "D/" + str(s) + '.png', dmap) print(rgb.shape, dmap.shape) print(type(rgb), type(dmap)) with open(saving_folder_path + "PC/" + str(s) + '.ply', 'w') as output: output.write(ply_content) print(saving_folder_path + "PC/" + str(s) + '.ply', ' done') s += 1 # uncomment for multiple captures # ### Get pointcloud of scene for prediction # points_np = (np.array(points_content)[:, :3]).astype(np.float32) # choice = np.random.choice(len(points_np), num_points, replace=True) # points_np = points_np[choice, :] # points_torch = torch.from_numpy(points_np) # # points_torch = points_torch.transpose(1, 0).contiguous() # # points_torch = Variable(points_torch.view(1, points_torch.size()[0], points_torch.size()[1])) # # ### Predict to segment scene # pred, _ = classifier(points_torch) # pred_choice = pred.data.max(2)[1] # print(pred_choice) ## Streams # RGB rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w) # DEPTH dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w) # canvas canvas = np.hstack((rgb, d4d)) ## Display the stream syde-by-side cv2.imshow('depth || rgb', canvas) # end while ## Release resources cv2.destroyAllWindows() rgb_stream.stop() depth_stream.stop() openni2.unload() print("Terminated")