예제 #1
0
    def colorStream(self, pathDir:str):
        color_stream = self.dev.create_color_stream()
        color_stream.start()
        color_stream.set_video_mode( c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX = 640,
                                                        resolutionY = 480, fps = 30))

        while True:
            frame = color_stream.read_frame()
            frame_data = frame.get_buffer_as_uint8()

            img = np.frombuffer(frame_data, dtype=np.uint8)
            img.shape = (307200, 3)
            b = img[:, 0].reshape(480, 640)
            g = img[:, 1].reshape(480, 640)
            r = img[:, 2].reshape(480, 640)

            rgb = (r[..., np.newaxis], g[..., np.newaxis], b[..., np.newaxis])
            img = np.concatenate(rgb, axis=-1)

            cv2.imshow("Color Image", img)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break

            if key == ord("c"):
                filename = pathDir +  str(datetime.datetime.now()) + ".png"
                cv2.imwrite(filename, img)

        openni2.unload()
        cv2.destroyAllWindows()
예제 #2
0
    def __init__(self):

        ######### Register the device##################
        openni2.initialize()
        self.dev = openni2.Device.open_any()
        ########## create the streams ###########
        #self.rgb_stream = self.dev.create_color_stream()
        self.depth_stream = self.dev.create_depth_stream()
        #self.color_stream = self.dev.create_color_stream()
        self.dev.set_depth_color_sync_enabled(True)  # synchronize the streams
        self.dev.set_image_registration_mode(
            openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
        self.depth_stram = self.depth_stream.set_video_mode(
            c_api.OniVideoMode(
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
                resolutionX=640,
                resolutionY=400,
                fps=30))
        #         self.depth_stram = self.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480,fps=30))
        #self.color_stream = self.color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,resolutionX=640,resolutionY=480,fps=30))
        ######### strat the stream #############
        self.depth_stream.start()
        #self.rgb_stream.start()

        ###########################################
        ################Mouth###############
        self.predictor = predictor_path = '/home/odroid/feeding20190221_Hui/shape_predictor_68_face_landmarks.dat'  #Register land marks
        self.detector = dlib.get_frontal_face_detector(
        )  # detector will get the frontal face
        self.predictor = dlib.shape_predictor(
            predictor_path)  #Register land marks

        self.mouth_status = 0
예제 #3
0
    def depthStream(self):
        depth_stream = self.dev.create_depth_stream()
        depth_stream.start()
        depth_stream.set_video_mode( c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
                                                        resolutionX=640, resolutionY=480, fps=30))

        while True:
            # Grab a new depth frame
            frame = depth_stream.read_frame()
            frame_data = frame.get_buffer_as_uint16()
            # Put the depth frame into a numpy array and reshape it
            img = np.frombuffer(frame_data, dtype=np.uint16)
            img.shape = (1, 480, 640)
            img = np.concatenate((img, img, img), axis=0)
            img = np.swapaxes(img, 0, 2)
            img = np.swapaxes(img, 0, 1)

            # Display the reshaped depth frame using OpenCV
            cv2.imshow("Depth Image", img)
            key = cv2.waitKey(1) & 0xFF

            # If the 'c' key is pressed, break the while loop
            if key == ord("q"):
                break

        # Close all windows and unload the depth device
        openni2.unload()
        cv2.destroyAllWindows()
예제 #4
0
 def __init__(self, cam_id="rtsp://admin:@192.168.52.52/h265/main/av_stream"):
     self._cam_id = 0  # cam_id
     self._fps = 30
     try:
         self._cap = cv2.VideoCapture(self._cam_id)
         self._cap.set(cv2.CAP_PROP_FPS, self._fps)
         self._height = self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
         self._width = self._cap.get(cv2.CAP_PROP_FRAME_WIDTH)
         self._cap.set(cv2.CAP_PROP_BRIGHTNESS, 0)
         print self._cap.get(cv2.CAP_PROP_BRIGHTNESS)
         print self._cap.set(cv2.CAP_PROP_BACKLIGHT, -64)
         openni2.initialize()
         self.device = openni2.Device.open_any()
         print 'initialized'
         self.depth_stream = self.device.create_depth_stream()
         self.depth_stream.set_mirroring_enabled(False)
         self.corlor_stream = self.device.create_color_stream()
         self.corlor_stream.set_mirroring_enabled(False)
         self.device.set_depth_color_sync_enabled(True)
         self.depth_stream.start()
         self.corlor_stream.start()
         self.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=640, resolutionY=480, fps=self._fps))
         # self.corlor_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480, fps=1))
     except Exception as e:
         print e
예제 #5
0
    def __init__(self, debug=False):
        self.debug = debug
        self._scale_depth = True
        self._scale_rgb_colors = False

        openni2.initialize("/usr/lib")

        dev = openni2.Device.open_any()
        if debug:
            print(dev.get_device_info())

        dev.set_image_registration_mode(True)
        dev.set_depth_color_sync_enabled(True)

        # create depth stream
        self.depth_stream = dev.create_depth_stream()
        self.depth_stream.set_video_mode(
            c_api.OniVideoMode(
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
                resolutionX=Depth_ResX,
                resolutionY=Depth_ResY,
                fps=Depth_fps,
            ))
        depth_sensor_info = self.depth_stream.get_sensor_info()

        self.max_depth = self.depth_stream.get_max_pixel_value()
        self.min_depth = 0

        if self.debug:
            for itm in depth_sensor_info.videoModes:
                print(itm)
            print("Min depth value: {}".format(self.min_depth))
            print("Max depth value: {}".format(self.max_depth))

        self.rgb_stream = dev.create_color_stream()
        self.rgb_stream.set_video_mode(
            c_api.OniVideoMode(
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
                resolutionX=RGB_ResX,
                resolutionY=RGB_ResY,
                fps=RGB_fps,
            ))

        if self.debug:
            rgb_sensor_info = self.rgb_stream.get_sensor_info()
            for itm in rgb_sensor_info.videoModes:
                print(itm)
예제 #6
0
def main(args):
    device = getOrbbec()
    # 创建深度流
    depth_stream = device.create_depth_stream()
    depth_stream.set_mirroring_enabled(args.mirroring)
    depth_stream.set_video_mode(
        c_api.OniVideoMode(
            resolutionX=args.width,
            resolutionY=args.height,
            fps=args.fps,
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM))

    # 获取uvc
    cap = cv2.VideoCapture(0)

    # 设置 镜像 帧同步
    device.set_image_registration_mode(True)
    device.set_depth_color_sync_enabled(True)
    depth_stream.start()

    while True:
        # 读取帧
        frame_depth = depth_stream.read_frame()
        frame_depth_data = frame_depth.get_buffer_as_uint16()
        # 读取帧的深度信息 depth_array 也是可以用在后端处理的 numpy格式的
        depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                 dtype=np.uint16,
                                 buffer=frame_depth_data)
        # 变换格式用于 opencv 显示
        depth_uint8 = 1 - 250 / (depth_array)
        depth_uint8[depth_uint8 > 1] = 1
        depth_uint8[depth_uint8 < 0] = 0
        cv2.imshow('depth', depth_uint8)

        # 读取 彩色图
        _, color_array = cap.read()
        cv2.imshow('color', color_array)

        # 对彩色图 color_array 做处理

        # 对深度图 depth_array 做处理

        # 键盘监听
        if cv2.waitKey(1) == ord('q'):
            # 关闭窗口 和 相机
            depth_stream.stop()
            cap.release()
            cv2.destroyAllWindows()
            break

    # 检测设备是否关闭(没什么用)
    try:
        openni2.unload()
        print("Device unloaded \n")
    except Exception as ex:
        print("Device not unloaded: ", ex, "\n")
예제 #7
0
    def initialize_color_stream(self):
        self.color_stream = self.dev.create_color_stream()
        self.color_stream.start()
        self.color_stream.set_video_mode(
            c_api.OniVideoMode(
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
                resolutionX=640,
                resolutionY=480,
                fps=30))

        if self.noMirror:
            self.color_stream.set_mirroring_enabled(False)
예제 #8
0
    def initialize_depth_stream(self):
        self.depth_stream = self.dev.create_depth_stream()
        self.depth_stream.start()
        self.depth_stream.set_video_mode(
            c_api.OniVideoMode(
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
                resolutionX=640,
                resolutionY=480,
                fps=30))

        if self.noMirror:
            self.depth_stream.set_mirroring_enabled(False)
        self.dev.set_image_registration_mode(
            openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
예제 #9
0
    def _start_depth_stream(self) -> openni2.VideoStream:
        if self.dev is not None:
            depth_stream = self.dev.create_depth_stream()
        else:
            raise Exception("Initialize AstraCamera using with statement.")

        if depth_stream is None:
            raise Exception("Current device has not a depth sensor.")

        depth_stream.start()
        depth_stream.set_video_mode(
            c_api.OniVideoMode(
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
                resolutionX=PROPERTIES["width"],
                resolutionY=PROPERTIES["height"],
                fps=PROPERTIES["fps"],
            ))

        depth_stream.set_mirroring_enabled(False)
        depth_stream.start()

        self.depth_stream = depth_stream

        return depth_stream
예제 #10
0
import numpy as np
import cv2
import sys
from openni import openni2
from openni import _openni2 as c_api


# Initialize the depth device
openni2.initialize()
dev = openni2.Device.open_any()

# Start the depth stream
depth_stream = dev.create_depth_stream()
depth_stream.start()
depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX = 640, resolutionY = 480, fps = 30))

# Start the color stream
color_stream = dev.create_color_stream()
color_stream.start()
color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX = 640, resolutionY = 480, fps = 30))

#undistorted = Frame(640, 480, 4)
#registered = Frame(640, 480, 4)

#QT app
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
g = gl.GLGridItem()
w.addItem(g)
예제 #11
0
def main():
    # Init openni
    openni_dir = "../OpenNI_2.3.0.55/Linux/OpenNI-Linux-x64-2.3.0.55/Redist"
    openni2.initialize(openni_dir)

    # Open astra color stream (using opencv)
    color_capture = cv.VideoCapture(2)
    color_capture.set(cv.CAP_PROP_FPS, 5)

    # Open astra depth stream (using openni)
    depth_device = openni2.Device.open_any()
    depth_stream = depth_device.create_depth_stream()
    depth_stream.start()
    depth_stream.set_video_mode(
        _openni2.OniVideoMode(
            pixelFormat=_openni2.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
            resolutionX=640,
            resolutionY=480,
            fps=5))
    depth_device.set_image_registration_mode(
        openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    # Create pointcloud visualizer
    visualizer = open3d.visualization.Visualizer()
    visualizer.create_window("Pointcloud", width=1000, height=700)

    # Camera intrinsics of the astra pro
    astra_camera_intrinsics = open3d.camera.PinholeCameraIntrinsic(width=640,
                                                                   height=480,
                                                                   fx=585,
                                                                   fy=585,
                                                                   cx=320,
                                                                   cy=250)

    # Create initial pointcloud
    pointcloud = open3d.geometry.PointCloud()
    visualizer.add_geometry(pointcloud)

    first = True
    prev_timestamp = 0
    num_stored = 0

    while True:
        rgbd = get_rgbd(color_capture, depth_stream)

        # Convert images to pointcloud
        new_pointcloud = open3d.geometry.PointCloud.create_from_rgbd_image(
            rgbd,
            intrinsic=astra_camera_intrinsics,
        )
        # Flip pointcloud
        new_pointcloud.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
                                  [0, 0, 0, 1]])
        # Set rendered pointcloud to recorded pointcloud
        pointcloud.points = new_pointcloud.points
        pointcloud.colors = new_pointcloud.colors

        # Save pointcloud each n seconds
        if SAVE_POINTCLOUDS and time.time() > prev_timestamp + 5:
            filename = "pointcloud-%r.pcd" % num_stored
            open3d.io.write_point_cloud(filename, new_pointcloud)
            num_stored += 1
            print("Stored: %s" % filename)
            prev_timestamp = time.time()

        # Reset viewpoint in first frame to look at the scene correctly
        # (e.g. correct bounding box, direction, distance, etc.)
        if first:
            visualizer.reset_view_point(True)
            first = False

        # Update visualizer
        visualizer.update_geometry()
        visualizer.poll_events()
        visualizer.update_renderer()
예제 #12
0
        self.vtkPolyData.SetVerts(self.vtkCells)
        self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth)
        self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray')


# Initialize the depth device
openni2.initialize()
dev = openni2.Device.open_any()

# Start the depth stream
depth_stream = dev.create_depth_stream()
depth_stream.start()
depth_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
        resolutionX=640,
        resolutionY=480,
        fps=30))

# Start the color stream
color_stream = dev.create_color_stream()
color_stream.start()
color_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
        resolutionX=640,
        resolutionY=480,
        fps=30))

# Function to return some pixel information when the OpenCV window is clicked
refPt = []
예제 #13
0
def write_files(dev):

    if debug:
        import cv2
        import numpy as np
    depth_stream = dev.create_depth_stream()
    color_stream = dev.create_color_stream()
    depth_stream.set_mirroring_enabled(mirroring)
    color_stream.set_mirroring_enabled(mirroring)
    depth_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
            resolutionX=width,
            resolutionY=height,
            fps=fps))
    color_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
            resolutionX=width,
            resolutionY=height,
            fps=fps))
    dev.set_image_registration_mode(True)
    dev.set_depth_color_sync_enabled(True)
    depth_stream.start()
    color_stream.start()

    actual_date = datetime.now().strftime("%Y%m%d-%H%M%S%f")[:-3]
    rec = openni2.Recorder((actual_date + ".oni").encode('utf-8'))
    rec.attach(depth_stream, compression)
    rec.attach(color_stream, compression)
    rec.start()
    print("Recording started.. press ctrl+C to stop or wait " + str(length) +
          " seconds..")
    start = time.time()
    depth_scale_factor = 0.05
    depth_scale_beta_factor = 0
    try:
        while True:
            if (time.time() - start) > length:
                break
            if debug:
                frame_color = color_stream.read_frame()
                frame_depth = depth_stream.read_frame()

                frame_color_data = frame_color.get_buffer_as_uint8()
                frame_depth_data = frame_depth.get_buffer_as_uint16()

                color_array = np.ndarray(
                    (frame_color.height, frame_color.width, 3),
                    dtype=np.uint8,
                    buffer=frame_color_data)
                color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)
                depth_array = np.ndarray(
                    (frame_depth.height, frame_depth.width),
                    dtype=np.uint16,
                    buffer=frame_depth_data)
                depth_uint8 = depth_array * depth_scale_factor + depth_scale_beta_factor
                depth_uint8[depth_uint8 > 255] = 255
                depth_uint8[depth_uint8 < 0] = 0
                depth_uint8 = depth_uint8.astype('uint8')

                cv2.imshow('depth', depth_uint8)
                cv2.imshow('color', color_array)
                cv2.waitKey(1)
    except KeyboardInterrupt:
        pass
    cv2.destroyAllWindows()
    rec.stop()
    depth_stream.stop()
    color_stream.stop()
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''MODEL LOADING 加载模型'''
    MODEL = importlib.import_module(args.model)  # 导入模型所在的模块
    classifier = MODEL.get_model(args.num_joint * 3,
                                 normal_channel=args.normal).cuda()

    experiment_dir = Path('./log/regression/pointnet2_reg_msg')
    checkpoint = torch.load(
        str(experiment_dir) + '/checkpoints/best_model.pth')
    classifier.load_state_dict(checkpoint['model_state_dict'])
    classifier.eval()

    try:
        ''' 加载摄像机 '''
        device = getOrbbec()
        # 创建深度流
        depth_stream = device.create_depth_stream()
        depth_stream.set_mirroring_enabled(args.mirroring)
        depth_stream.set_video_mode(
            c_api.OniVideoMode(
                resolutionX=args.width,
                resolutionY=args.height,
                fps=args.fps,
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM))
        # 获取uvc
        cap = cv2.VideoCapture(0)

        # 设置 镜像 帧同步
        device.set_image_registration_mode(True)
        device.set_depth_color_sync_enabled(True)
        depth_stream.start()

        while True:
            # 读取帧
            frame_depth = depth_stream.read_frame()
            frame_depth_data = frame_depth.get_buffer_as_uint16()
            # 读取帧的深度信息 depth_array 也是可以用在后端处理的 numpy格式的
            depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                     dtype=np.uint16,
                                     buffer=frame_depth_data)
            # 变换格式用于 opencv 显示
            depth_uint8 = 1 - 250 / (depth_array)
            depth_uint8[depth_uint8 > 1] = 1
            depth_uint8[depth_uint8 < 0] = 0
            cv2.imshow('depth', depth_uint8)

            # 读取 彩色图
            _, color_array = cap.read()
            cv2.imshow('color', color_array)

            # 对彩色图 color_array 做处理

            # 对深度图 depth_array 做处理

            # 键盘监听
            if cv2.waitKey(1) == ord('q'):
                # 关闭窗口 和 相机
                depth_stream.stop()
                cap.release()
                cv2.destroyAllWindows()
                break

        # 检测设备是否关闭(没什么用)
        try:
            openni2.unload()
            print("Device unloaded \n")
        except Exception as ex:
            print("Device not unloaded: ", ex, "\n")

    except:
        # 读取 depth
        depth_array = matplotlib.image.imread('test_depth_1.tif').astype(
            np.float32)
        # depth to UVD
        cloud_point = depth2uvd(depth_array)
        # 将点云归一化
        cloud_point_normal, scale, centroid = pc_normalize(cloud_point)
        cloud_point_normal = np.reshape(cloud_point_normal, (1, -1, 3))
        cloud_point_normal = cloud_point_normal.transpose(0, 2, 1)
        # 对归一化的点云做预测
        cloud_point_normal = torch.from_numpy(cloud_point_normal).cuda()
        pred, _ = classifier(cloud_point_normal)
        # 对预测结果做还原
        pred_reduction = pred.cpu().data.numpy()
        pred_reduction = pred_reduction * np.tile(
            scale, (args.num_joint * 3, 1)).transpose(1, 0)
        pred_reduction = pred_reduction + np.tile(centroid,
                                                  (1, args.num_joint))
        pred_reduction = np.reshape(pred_reduction, (-1, 3))

        displayPoint2(cloud_point, pred_reduction, 'bear')
예제 #15
0
anchors_path = os.path.join(src_path, 'keras_yolo3', 'model_data',
                            'yolo_anchors.txt')

FLAGS = None

frame_cache = np.zeros((1, 240, 320))
temperature = np.array([0])
path = 'C:/Users/ELLAB_TEST/Python Projects/orbecc viewer/Orbecmini_321'

openni2.initialize("C:\Program Files\OpenNI-Windows-x64-2.3\Redist")
dev = openni2.Device.open_any()
depth_stream = dev.create_color_stream()
depth_stream.start()
depth_stream.set_video_mode(
    c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_YUYV,
                       resolutionX=640,
                       resolutionY=480,
                       fps=30))
cx = 0
cy = 0

if __name__ == '__main__':
    # Delete all default flags
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''

    parser.add_argument(
        "--input_path",
        type=str,
        default=image_test_folder,