def __init__(self, video_path, is_color, slider): super().__init__() self.is_color = is_color self.is_paused = False self.current_frame = 2 # cap2.oni запускается только со 2-го кадра self.slider: QSlider = slider self.dev = openni2.Device.open_file(video_path.encode('utf-8')) self.pbs = openni2.PlaybackSupport(self.dev) self.image_stream = openni2.VideoStream( self.dev, SENSOR_COLOR if is_color else SENSOR_DEPTH) self.image_stream.start()
def openDevice(video_path): try: if sys.platform == "win32": libpath = "lib/Windows" else: libpath = "lib/Linux" openni2.initialize(libpath) dev = openni2.Device.open_file(video_path) pbs = openni2.PlaybackSupport(dev) pbs.set_repeat_enabled(True) pbs.set_speed(-1.0) return dev, pbs except Exception as ex: print(ex) raise Exception("Initialization Error")
def open_device(self): if self.is_open: self.stop_video() path = self.browse_folder() if path: self.device = self.device.open_file(path) self.depth_stream = self.device.create_depth_stream() self.color_stream = self.device.create_color_stream() self.num_depth_frames = self.depth_stream.get_number_of_frames() self.num_color_frames = self.color_stream.get_number_of_frames() self.playback_support = openni2.PlaybackSupport(self.device) self.horizontalSlider.setRange(2, self.num_depth_frames) self.is_open = True self.play_button.setEnabled(True) self.stop_button.setEnabled(True) self.next_button.setEnabled(True) self.prev_button.setEnabled(True)
def oni_converter(file_path): ''' Convert oni file to color and depth avi files. avi files will be saved in working directory as color.avi and depth.avi Parameters ---------- file_path : str full path to oni file ''' count = 0 result = None PATH_TO_OPENNI2_SO_FILE = './OpenNI-Linux-x64-2.2/Redist' t1_start = process_time() openni2.initialize(PATH_TO_OPENNI2_SO_FILE) dev = openni2.Device.open_file(file_path.encode('utf-8')) c_stream, d_stream = dev.create_color_stream(), dev.create_depth_stream() openni2.PlaybackSupport(dev).set_speed(ctypes.c_float(0.0)) d_stream.start() c_stream.start() c_images, d_images = [], [] n_c_frames = openni2.PlaybackSupport( dev).get_number_of_frames(c_stream) - 1 n_d_frames = openni2.PlaybackSupport(dev).get_number_of_frames(d_stream) if n_c_frames != n_d_frames: print('Разное количество кадров в color и depth потоках!\n' f'color - {n_c_frames}, depth - {n_d_frames}') sys.exit() for i in tqdm(range(n_c_frames)): # process depth stream d_frame = np.fromstring(d_stream.read_frame().get_buffer_as_uint16(), dtype=np.uint16).reshape(480, 640) # Correct the range. Depth images are 12bits d_img = np.uint8(d_frame.astype(float) * 255 / 2**12 - 1) d_img = cv2.cvtColor(d_img, cv2.COLOR_GRAY2RGB) d_img = 255 - d_img d_images.append(d_img) if i == 0: continue # process color stream c_frame = c_stream.read_frame() c_frame_data = c_frame.get_buffer_as_uint8() c_img_bgr = np.frombuffer(c_frame_data, dtype=np.uint8) c_img_bgr.shape = (480, 640, 3) c_img_rgb = cv2.cvtColor(c_img_bgr, cv2.COLOR_BGR2RGB) c_images.append(c_img_rgb) count += 1 # yield count openni2.unload() c_out = cv2.VideoWriter( 'color.avi', cv2.VideoWriter_fourcc(*'DIVX'), 30, (640, 480)) # надо добавить fps как переменную d_out = cv2.VideoWriter( 'depth.avi', cv2.VideoWriter_fourcc(*'DIVX'), 30, (640, 480)) # надо добавить fps как переменную for i in tqdm(range(len(c_images))): c_out.write(c_images[i]) d_out.write(d_images[i]) count += 1 # yield count c_out.release() d_out.release() t1_stop = process_time() print(f"Process duration, seconds:, {round(t1_stop-t1_start, 3)}") return "ONI file has been processed successfully."
def play(name): filename = os.path.join('data', name) recorder_exposure = open(filename + '_exposure.txt', 'r') frame_exposure = open(filename + '_frame_exp.txt', 'w') dev = openni2.Device.open_file((filename + '.oni').encode('utf-8')) pc = openni2.PlaybackSupport(dev) pc.set_speed(-1.0) pc.set_repeat_enabled(False) print(dev.get_device_info()) depth_stream = dev.create_depth_stream() depth_mode = depth_stream.get_video_mode() depth_stream.start() print(depth_mode) color_stream = dev.create_color_stream() color_mode = color_stream.get_video_mode() color_stream.start() print(color_mode) print('Replay Start') is_end = False g = np.loadtxt(filename + '_curve.txt') img_cnt = 0 exposure = 64 exposure_next = int(recorder_exposure.readline()) last_median = [0, 0] for t in range(pc.get_number_of_frames(color_stream)): _, _, is_end, _, image_color = show_image(depth_stream, color_stream) if is_end: break curr_median = [ np.percentile(image_color, 25) + 5, np.percentile(image_color, 75) + 5 ] if img_cnt > 1 and \ ((exposure_next > exposure or exposure == 0) and (curr_median[1] / last_median[1] > 1.2 or curr_median[0] / last_median[0] > 1.2)) or \ ((exposure_next < exposure or exposure == 0) and (last_median[1] / curr_median[1] > 1.2 or last_median[0] / curr_median[0] > 1.2)): img_cnt = 0 exposure = exposure_next exposure_next = recorder_exposure.readline() if exposure_next == '': is_end = True else: exposure_next = int(exposure_next) print(exposure) frame_exposure.write(str(exposure) + '\n') frame_exposure.flush() last_median[0] = curr_median[0] last_median[1] = curr_median[1] print('\t', img_cnt) img_cnt += 1 if exposure > 0: image_color = np.exp(g[image_color]) / exposure * 10 cv2.imshow('linear', image_color) cv2.waitKey(1) recorder_exposure.close() frame_exposure.close() depth_stream.stop() color_stream.stop()