コード例 #1
0
ファイル: palm.py プロジェクト: OAKChina/depthai-examples
def create_pipeline(video):
    pipeline = dai.Pipeline()
    palm = pipeline.create(dai.node.NeuralNetwork)
    palm.setBlobPath("models/palm.blob")
    palm.setNumInferenceThreads(2)
    palm.input.setBlocking(False)
    palm.input.setQueueSize(2)

    if video:
        palm_in = pipeline.create(dai.node.XLinkIn)
        palm_in.setStreamName("palm_in")
        palm_in.setMaxDataSize(128 * 128 * 3)
        palm_in.out.link(palm.input)
    else:
        cam = pipeline.create(dai.node.ColorCamera)
        cam.setResolution(
            dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setBoardSocket(dai.CameraBoardSocket.RGB)
        cam.setInterleaved(False)
        cam.setPreviewSize(preview_size)

        cam_out = pipeline.create(dai.node.XLinkOut)
        cam_out.setStreamName("rgb")
        cam.preview.link(cam_out.input)
        manip = pipeline.create(dai.node.ImageManip)
        manip.initialConfig.setResize(128, 128)
        cam.preview.link(manip.inputImage)
        manip.out.link(palm.input)
        # cam.preview.link(face_det.input)

    palm_out = pipeline.create(dai.node.XLinkOut)
    palm_out.setStreamName("palm_nn")
    palm.out.link(palm_out.input)

    return pipeline
コード例 #2
0
    def create_pipeline(self, model_blob):
        self.pipeline = dai.Pipeline()

        face_detector_in = self.pipeline.createXLinkIn()
        face_detector_in.setStreamName("face_detector_in")

        face_detector = self.pipeline.createNeuralNetwork()
        face_detector.setBlobPath(model_blob["detector"])

        face_detector_out = self.pipeline.createXLinkOut()
        face_detector_out.setStreamName("face_detector_out")

        age_gender_in = self.pipeline.createXLinkIn()
        age_gender_in.setStreamName("age_gender_in")

        age_gender_nn = self.pipeline.createNeuralNetwork()
        age_gender_nn.setBlobPath(model_blob["age_gender"])

        age_gender_out = self.pipeline.createXLinkOut()
        age_gender_out.setStreamName("age_gender_out")

        face_detector_in.out.link(face_detector.input)
        face_detector.out.link(face_detector_out.input)
        age_gender_in.out.link(age_gender_nn.input)
        age_gender_nn.out.link(age_gender_out.input)
コード例 #3
0
def create_rgb_cam_pipeline():
    print("Creating pipeline: RGB CAM -> XLINK OUT")
    pipeline = dai.Pipeline()

    cam          = pipeline.createColorCamera()
    control_in = pipeline.createXLinkIn()
    control_in.setStreamName('control_r')
    xout_preview = pipeline.createXLinkOut()
    xout_video   = pipeline.createXLinkOut()


    cam.setPreviewSize(540, 540)
    cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam.setInterleaved(False)
    cam.setBoardSocket(dai.CameraBoardSocket.RGB)

    xout_preview.setStreamName('rgb_preview')
    xout_video  .setStreamName('rgb_video')

    control_in.out.link(cam.inputControl)
    cam.preview   .link(xout_preview.input)
    cam.video     .link(xout_video.input)

    streams = ['rgb_preview', 'rgb_video']

    return pipeline, streams
コード例 #4
0
def create_pipeline():
    # Start defining a pipeline
    pipeline = dai.Pipeline()

    # Define a source - color camera
    camRgb = pipeline.createColorCamera()
    camRgb.setPreviewSize(300, 300)
    camRgb.setInterleaved(False)
    camRgb.setFps(40)

    # Define a neural network that will make predictions based on the source frames
    nn = pipeline.createMobileNetDetectionNetwork()
    nn.setConfidenceThreshold(0.5)
    nn.setBlobPath(str(Path("models/model.blob").resolve().absolute()))
    nn.setNumInferenceThreads(2)
    # nn.input.setBlocking(False)

    if args.camera:
        camRgb.preview.link(nn.input)
    else:
        detection_in = pipeline.createXLinkIn()
        detection_in.setStreamName("detection_in")
        detection_in.out.link(nn.input)

    # Create outputs
    xoutRgb = pipeline.createXLinkOut()
    xoutRgb.setStreamName("rgb")
    camRgb.preview.link(xoutRgb.input)

    nnOut = pipeline.createXLinkOut()
    nnOut.setStreamName("nn")
    nn.out.link(nnOut.input)

    return pipeline
コード例 #5
0
def photo(name):
    pipeline = depthai.Pipeline()
    cam = pipeline.createColorCamera()
    cam.setPreviewSize(300, 300)
    cam.setResolution(
        depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam.setInterleaved(False)
    cam.setCamId(0)
    cam_xout = pipeline.createXLinkOut()
    cam_xout.setStreamName("cam_out")
    cam.preview.link(cam_xout.input)
    device = depthai.Device()
    device.startPipeline(pipeline)
    cam_out = device.getOutputQueue("cam_out", 1, True)
    mkdir(name)
    count = 0
    while (True):
        frame = np.array(cam_out.get().getData()).reshape(
            (3, 300, 300)).transpose(1, 2, 0).astype(np.uint8)
        cv2.imshow("capture", frame)
        k = cv2.waitKey(1) & 0xFF
        if k == ord('q'):
            break
        if k == ord('s'):
            cv2.imwrite(f"./images/{name}/{name}_{count}.jpg", frame)
            print("成功保存一张图片")
            count += 1
    cv2.destroyAllWindows()
コード例 #6
0
ファイル: main.py プロジェクト: sandhyacs/depthai-experiments
def create_spi_demo_pipeline():
    print("Creating SPI pipeline: ")
    print("COLOR CAM -> ENCODER -> SPI OUT")
    pipeline = dai.Pipeline()

    cam_color = pipeline.createColorCamera()
    spiout_preview = pipeline.createSPIOut()
    videnc = pipeline.createVideoEncoder()

    # set up color camera and link to NN node
    cam_color.setPreviewSize(300, 300)
    cam_color.setResolution(
        dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam_color.setInterleaved(False)
    cam_color.setCamId(0)
    cam_color.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)

    # VideoEncoder
    videnc.setDefaultProfilePreset(1920, 1080, 30,
                                   dai.VideoEncoderProperties.Profile.MJPEG)

    # Link plugins CAM -> ENCODER -> SPI OUT
    cam_color.video.link(videnc.input)
    spiout_preview.setStreamName("spipreview")
    spiout_preview.setBusId(0)
    videnc.bitstream.link(spiout_preview.input)

    return pipeline
コード例 #7
0
def create_pipeline_ssd():
    global useOAKDCam
    print("Creating pipeline...")
    pipeline = depthai.Pipeline()

    if useOAKDCam:
        # ColorCamera
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(544, 320)
        cam.setResolution(
            depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)

    # NeuralNetwork
    print("Creating Person Detection Neural Network...")
    detection_in = pipeline.createXLinkIn()
    detection_in.setStreamName("detection_in")
    detection_nn = pipeline.createNeuralNetwork()
    detection_nn.setBlobPath(
        str(
            Path(
                "../OSSDC-VisionAI-Datasets/pretrained/oakd-mobile-ssd/mobilenet.blob"
            ).resolve().absolute()))
    detection_nn_xout = pipeline.createXLinkOut()
    detection_nn_xout.setStreamName("detection_nn")
    detection_in.out.link(detection_nn.input)
    detection_nn.out.link(detection_nn_xout.input)

    print("Pipeline created.")
    return pipeline
コード例 #8
0
ファイル: RunTool.py プロジェクト: luxonis/depthai-gui
    def do(self):
        try:
            import depthai
            pipeline = depthai.Pipeline()
            rootGraph = self.pyFlowInstance.graphManager.get().findRootGraph()
            device_nodes = list(
                filter(lambda node: isinstance(node, DeviceNode),
                       rootGraph.getNodesList()))
            for node in device_nodes:
                node.build_pipeline(pipeline)
            for node in device_nodes:
                node.build_connections()

            self.found, self.device_info = depthai.XLinkConnection.getFirstDevice(
                depthai.XLinkDeviceState.X_LINK_UNBOOTED)
            if not self.found:
                raise RuntimeError("Device not found")
            self.device = depthai.Device(pipeline, self.device_info, True)
            self.device.startPipeline()

            self.host_nodes = list(
                filter(lambda node: isinstance(node, HostNode),
                       rootGraph.getNodesList()))
            for node in self.host_nodes:
                node.run_node(self.device)

        except Exception as e:
            traceback.print_exc()
            QMessageBox.warning(self.pyFlowInstance, "Warning", str(e))
コード例 #9
0
def getPipeline():
    # Start defining a pipeline
    pipeline = dai.Pipeline()

    # Define a source - color camera
    cam_rgb = pipeline.createColorCamera()
    # For the demo, just set a larger RGB preview size for OAK-D
    cam_rgb.setPreviewSize(300, 300)
    cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
    cam_rgb.setResolution(
        dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam_rgb.setInterleaved(False)

    detector = pipeline.createMobileNetDetectionNetwork()
    detector.setConfidenceThreshold(0.5)
    detector.setBlobPath(blobconverter.from_zoo(name="mobilenet-ssd",
                                                shaves=6))
    cam_rgb.preview.link(detector.input)

    # Create output
    xout_rgb = pipeline.createXLinkOut()
    xout_rgb.setStreamName("rgb")
    detector.passthrough.link(xout_rgb.input)

    xout_nn = pipeline.createXLinkOut()
    xout_nn.setStreamName("nn")
    detector.out.link(xout_nn.input)

    return pipeline
コード例 #10
0
def create_pipeline(camera):
    print("Creating pipeline...")
    pipeline = depthai.Pipeline()
    if camera:
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(300, 300)
        cam.setResolution(
            depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)
        first_model(
            pipeline, cam,
            "models/face-detection-retail-0004_openvino_2020_1_4shave.blob",
            "face")
    else:
        models(
            pipeline,
            "models/face-detection-retail-0004_openvino_2020_1_4shave.blob",
            "face")
    models(pipeline,
           "models/face_landmark_160x160_openvino_2020_1_4shave.blob",
           "land68")
    return pipeline
コード例 #11
0
ファイル: main.py プロジェクト: sandhyacs/depthai-experiments
def create_spi_demo_pipeline(nnPath):
    print("Creating SPI pipeline: ")
    print("COLOR CAM -> DetectionNetwork -> SPI OUT")

    pipeline = dai.Pipeline()

    # set up NN node
    nn1 = pipeline.createNeuralNetwork()
    nn1.setBlobPath(nnPath)

    # set up color camera and link to NN node
    colorCam = pipeline.createColorCamera()
    colorCam.setPreviewSize(300, 300)
    colorCam.setResolution(
        dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    colorCam.setInterleaved(False)
    colorCam.setCamId(0)
    colorCam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
    colorCam.preview.link(nn1.input)

    # set up SPI out node and link to nn1
    spiOut = pipeline.createSPIOut()
    spiOut.setStreamName("spimetaout")
    spiOut.setBusId(0)
    nn1.out.link(spiOut.input)

    return pipeline
コード例 #12
0
def create_pipeline():
    print("Creating pipeline...")
    pipeline = depthai.Pipeline()

    # NeuralNetwork
    print("Creating Person Detection Neural Network...")
    detection_in = pipeline.createXLinkIn()
    detection_in.setStreamName("detection_in")
    detection_nn = pipeline.createNeuralNetwork()
    detection_nn.setBlobPath(
        str(
            Path("models/person-detection-retail-0013.blob").resolve().
            absolute()))
    detection_nn_xout = pipeline.createXLinkOut()
    detection_nn_xout.setStreamName("detection_nn")
    detection_in.out.link(detection_nn.input)
    detection_nn.out.link(detection_nn_xout.input)

    # NeuralNetwork
    print("Creating Person Reidentification Neural Network...")
    reid_in = pipeline.createXLinkIn()
    reid_in.setStreamName("reid_in")
    reid_nn = pipeline.createNeuralNetwork()
    reid_nn.setBlobPath(
        str(
            Path("models/person-reidentification-retail-0031.blob").resolve().
            absolute()))
    reid_nn_xout = pipeline.createXLinkOut()
    reid_nn_xout.setStreamName("reid_nn")
    reid_in.out.link(reid_nn.input)
    reid_nn.out.link(reid_nn_xout.input)

    print("Pipeline created.")
    return pipeline
コード例 #13
0
def create_pipeline(video, model_name, model_w, model_h):
    """
    Create a pipeline that uses neural networks to detect objects in an image

    :param video: True if you want to use a video file, False if you want to use the camera
    :param model_name: The name of the model to use
    :param model_w: The width of the model's input image
    :param model_h: The height of the model's input image
    :return: The pipeline object.
    """
    print("Creating pipeline...")
    pipeline = dai.Pipeline()

    # NeuralNetwork
    yoloDet = pipeline.createNeuralNetwork()
    if Path(model_name).suffix == "blob":
        yoloDet.setBlobPath(model_name)
    else:
        yoloDet.setBlobPath(
            blobconverter.from_onnx(
                model=MODELS.get(model_name, model_name),
                optimizer_params=[
                    "--scale_values=[58.395, 57.12 , 57.375]",
                    "--mean_values=[123.675, 116.28 , 103.53]",
                ],
                shaves=shaves,
            )
        )

    yolox_det_nn_xout = pipeline.createXLinkOut()
    yolox_det_nn_xout.setStreamName("yolox_det_nn")
    yoloDet.out.link(yolox_det_nn_xout.input)

    if video:
        yolox_det_in = pipeline.createXLinkIn()
        yolox_det_in.setStreamName("yolox_det_in")
        yolox_det_in.setMaxDataSize(model_w * model_h * 3)
        yolox_det_in.out.link(yoloDet.input)

    else:
        # ColorCamera
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(preview_size)
        cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(dai.CameraBoardSocket.RGB)

        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)

        manip = pipeline.createImageManip()
        manip.setMaxOutputFrameSize(model_w * model_h * 3)
        manip.initialConfig.setResizeThumbnail(model_w, model_h, 114, 114, 114)
        manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
        cam.preview.link(manip.inputImage)
        manip.out.link(yoloDet.input)
        # cam.preview.link(yoloDet.input)
    return pipeline
コード例 #14
0
    def create_pipeline(self, model_blob):
        self.pipeline = dai.Pipeline()

        first_stage_in = self.pipeline.createXLinkIn()
        first_stage_in.setStreamName("first_stage_in")

        first_stage_nn = self.pipeline.createNeuralNetwork()
        first_stage_nn.setBlobPath(model_blob["first_stage_nn"])

        first_stage_out = self.pipeline.createXLinkOut()
        first_stage_out.setStreamName("first_stage_out")

        second_stage_in = self.pipeline.createXLinkIn()
        second_stage_in.setStreamName("second_stage_in")

        second_stage_nn = self.pipeline.createNeuralNetwork()
        second_stage_nn.setBlobPath(model_blob["second_stage_nn"])

        second_stage_out = self.pipeline.createXLinkOut()
        second_stage_out.setStreamName("second_stage_out")

        first_stage_in.out.link(first_stage_nn.input)
        first_stage_nn.out.link(first_stage_out.input)
        second_stage_in.out.link(second_stage_nn.input)
        second_stage_nn.out.link(second_stage_out.input)
コード例 #15
0
ファイル: predict.py プロジェクト: daryllstrauss/drvr
def createPipeline(cam=False):
    pipeline = depthai.Pipeline()

    nn = pipeline.createNeuralNetwork()
    nn.setBlobPath("segmentation.blob")

    rgb_out = pipeline.createXLinkOut()
    rgb_out.setStreamName("rgb_out")

    nn_out = pipeline.createXLinkOut()
    nn_out.setStreamName("nn_out")
    nn.out.link(nn_out.input)

    if cam:
        camRgb = pipeline.createColorCamera()
        camRgb.setPreviewSize(224, 224)
        camRgb.setInterleaved(False)
        camRgb.setFps(30)
        camRgb.preview.link(nn.input)
        camRgb.preview.link(rgb_out.input)
    else:
        image_in = pipeline.createXLinkIn()
        image_in.setStreamName("image_in")
        image_in.out.link(nn.input)

    return pipeline
コード例 #16
0
    def __init__(self):
        self._q_left = None
        self._q_right = None
        self._q_rgb = None
        self._pipeline = dai.Pipeline()
        self._init_pipeline()

        self._is_started = False
コード例 #17
0
def create_pipeline(video):
    pipeline = dai.Pipeline()
    # pipeline.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)

    mesh = pipeline.create(dai.node.NeuralNetwork)
    mesh.setBlobPath("models/face_landmark_openvino_2021.4_6shave.blob")
    mesh.setNumInferenceThreads(2)
    mesh.input.setBlocking(False)
    mesh.input.setQueueSize(2)

    if video:
        mesh_in = pipeline.create(dai.node.XLinkIn)
        mesh_in.setStreamName("mesh_in")
        mesh_in.setMaxDataSize(192 * 192 * 3)
        mesh_in.out.link(mesh.input)
    else:
        cam = pipeline.create(dai.node.ColorCamera)
        cam.setResolution(
            dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setBoardSocket(dai.CameraBoardSocket.RGB)
        cam.setInterleaved(False)
        cam.setPreviewSize(preview_size)

        cam_out = pipeline.create(dai.node.XLinkOut)
        cam_out.setStreamName("rgb")
        cam.preview.link(cam_out.input)
        manip = pipeline.create(dai.node.ImageManip)
        manip.initialConfig.setResize(192, 192)
        cam.preview.link(manip.inputImage)
        manip.out.link(mesh.input)
        # cam.preview.link(face_det.input)

    mesh_out = pipeline.create(dai.node.XLinkOut)
    mesh_out.setStreamName("mesh_nn")
    mesh.out.link(mesh_out.input)

    eye = pipeline.create(dai.node.NeuralNetwork)
    eye.setBlobPath(
        blobconverter.from_zoo(
            "open-closed-eye-0001",
            shaves=6,
            # version=pipeline.getOpenVINOVersion()
        ))
    eye.setNumInferenceThreads(2)
    eye.input.setBlocking(False)
    eye.input.setQueueSize(2)

    eye_in = pipeline.create(dai.node.XLinkIn)
    eye_in.setStreamName("eye_in")
    eye_in.setMaxDataSize(32 * 32 * 3)
    eye_in.out.link(eye.input)

    eye_out = pipeline.create(dai.node.XLinkOut)
    eye_out.setStreamName("eye_nn")
    eye.out.link(eye_out.input)
    return pipeline
コード例 #18
0
    def create_pipeline(self, model_name):
        log.info("Creating DepthAI pipeline...")

        pipeline = dai.Pipeline()
        #pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_2)

        # Define sources and outputs
        camRgb = pipeline.createColorCamera()
        spatialDetectionNetwork = pipeline.createMobileNetSpatialDetectionNetwork(
        )
        monoLeft = pipeline.createMonoCamera()
        monoRight = pipeline.createMonoCamera()
        stereo = pipeline.createStereoDepth()

        xoutRgb = pipeline.createXLinkOut()
        camRgb.preview.link(xoutRgb.input)
        xoutNN = pipeline.createXLinkOut()

        xoutRgb.setStreamName("rgb")
        xoutNN.setStreamName("detections")

        # Properties
        camRgb.setPreviewSize(544, 320)
        camRgb.setResolution(
            dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        camRgb.setInterleaved(False)
        camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)

        monoLeft.setResolution(
            dai.MonoCameraProperties.SensorResolution.THE_400_P)
        monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
        monoRight.setResolution(
            dai.MonoCameraProperties.SensorResolution.THE_400_P)
        monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

        # Setting node configs
        stereo.setConfidenceThreshold(255)

        spatialDetectionNetwork.setBlobPath(
            blobconverter.from_zoo(name=model_name, shaves=6))
        spatialDetectionNetwork.setConfidenceThreshold(0.5)
        spatialDetectionNetwork.input.setBlocking(False)
        spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
        spatialDetectionNetwork.setDepthLowerThreshold(100)
        spatialDetectionNetwork.setDepthUpperThreshold(5000)

        # Linking
        monoLeft.out.link(stereo.left)
        monoRight.out.link(stereo.right)

        camRgb.preview.link(spatialDetectionNetwork.input)

        spatialDetectionNetwork.out.link(xoutNN.input)
        stereo.depth.link(spatialDetectionNetwork.inputDepth)
        log.info("Pipeline created.")
        return pipeline
コード例 #19
0
    def create_pipeline(self, options):
        self.pipeline = dai.Pipeline()

        self.monoLeft = self.pipeline.createMonoCamera()
        self.monoRight = self.pipeline.createMonoCamera()
        self.depth = self.pipeline.createStereoDepth()
        self.xoutDepth = self.pipeline.createXLinkOut()
        self.xoutDepth.setStreamName("disparity")

        # Properties
        if options.mono_camera_resolution == 'THE_400_P':
            self.monoLeft.setResolution(
                dai.MonoCameraProperties.SensorResolution.THE_400_P)
            self.monoRight.setResolution(
                dai.MonoCameraProperties.SensorResolution.THE_400_P)
            self.frame = np.zeros((400, 640, 3), np.uint8)
        elif options.mono_camera_resolution == 'THE_720_P':
            self.monoLeft.setResolution(
                dai.MonoCameraProperties.SensorResolution.THE_720_P)
            self.monoRight.setResolution(
                dai.MonoCameraProperties.SensorResolution.THE_720_P)
            self.frame = np.zeros((720, 1280, 3), np.uint8)
        elif options.mono_camera_resolution == 'THE_800_P':
            self.monoLeft.setResolution(
                dai.MonoCameraProperties.SensorResolution.THE_800_P)
            self.monoRight.setResolution(
                dai.MonoCameraProperties.SensorResolution.THE_800_P)
            self.frame = np.zeros((800, 1280, 3), np.uint8)
        self.monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
        self.monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

        # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way)
        self.depth.setConfidenceThreshold(200)
        # Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default)
        if options.median_filter == 'MEDIAN_OFF':
            self.depth.setMedianFilter(
                dai.StereoDepthProperties.MedianFilter.MEDIAN_OFF)
        elif options.median_filter == 'KERNEL_3x3':
            self.depth.setMedianFilter(
                dai.StereoDepthProperties.MedianFilter.KERNEL_3x3)
        elif options.median_filter == 'KERNEL_5x5':
            self.depth.setMedianFilter(
                dai.StereoDepthProperties.MedianFilter.KERNEL_5x5)
        elif options.median_filter == 'KERNEL_7x7':
            self.depth.setMedianFilter(
                dai.StereoDepthProperties.MedianFilter.KERNEL_7x7)
        self.depth.setExtendedDisparity(options.extended_disparity)
        self.depth.setSubpixel(options.subpixel)

        # Linking
        self.monoLeft.out.link(self.depth.left)
        self.monoRight.out.link(self.depth.right)
        self.depth.disparity.link(self.xoutDepth.input)

        return self.pipeline
コード例 #20
0
def create_stereo_depth_pipeline():
    print("Creating Stereo Depth pipeline: ", end='')

    print("XLINK IN -> STEREO -> XLINK OUT")
    pipeline = dai.Pipeline()

    camLeft = pipeline.createXLinkIn()
    camRight = pipeline.createXLinkIn()
    stereo = pipeline.createStereoDepth()
    xoutLeft = pipeline.createXLinkOut()
    xoutRight = pipeline.createXLinkOut()
    xoutDepth = pipeline.createXLinkOut()
    xoutDisparity = pipeline.createXLinkOut()
    xoutRectifLeft = pipeline.createXLinkOut()
    xoutRectifRight = pipeline.createXLinkOut()

    camLeft.setStreamName('in_left')
    camRight.setStreamName('in_right')

    stereo.setOutputDepth(out_depth)
    stereo.setOutputRectified(out_rectified)
    stereo.setConfidenceThreshold(200)
    stereo.setRectifyEdgeFillColor(0)  # Black, to better see the cutout
    stereo.setMedianFilter(median)  # KERNEL_7x7 default
    stereo.setLeftRightCheck(lrcheck)
    stereo.setExtendedDisparity(extended)
    stereo.setSubpixel(subpixel)

    stereo.setEmptyCalibration(
    )  # Set if the input frames are already rectified
    stereo.setInputResolution(1280, 720)

    xoutLeft.setStreamName('left')
    xoutRight.setStreamName('right')
    xoutDepth.setStreamName('depth')
    xoutDisparity.setStreamName('disparity')
    xoutRectifLeft.setStreamName('rectified_left')
    xoutRectifRight.setStreamName('rectified_right')

    camLeft.out.link(stereo.left)
    camRight.out.link(stereo.right)
    stereo.syncedLeft.link(xoutLeft.input)
    stereo.syncedRight.link(xoutRight.input)
    stereo.depth.link(xoutDepth.input)
    stereo.disparity.link(xoutDisparity.input)
    stereo.rectifiedLeft.link(xoutRectifLeft.input)
    stereo.rectifiedRight.link(xoutRectifRight.input)

    streams = ['left', 'right']
    if out_rectified:
        streams.extend(['rectified_left', 'rectified_right'])
    streams.extend(['disparity', 'depth'])

    return pipeline, streams
コード例 #21
0
def create_pipeline_age_gen():
    global useOAKDCam
    print("Creating pipeline...")
    pipeline = depthai.Pipeline()

    if useOAKDCam:
        # ColorCamera
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(300, 300)
        cam.setResolution(
            depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)

    # NeuralNetwork
    print("Creating Face Detection Neural Network...")
    detection_nn = pipeline.createNeuralNetwork()
    detection_nn.setBlobPath(
        str(
            Path(
                "../depthai-experiments/gen2-age-gender/models/face-detection-retail-0004.blob"
            ).resolve().absolute()))
    detection_nn_xout = pipeline.createXLinkOut()
    detection_nn_xout.setStreamName("detection_nn")
    detection_nn.out.link(detection_nn_xout.input)

    if useOAKDCam:
        cam.preview.link(detection_nn.input)
    else:
        detection_in = pipeline.createXLinkIn()
        detection_in.setStreamName("detection_in")
        detection_in.out.link(detection_nn.input)

    # NeuralNetwork
    print("Creating Age Gender Neural Network...")
    age_gender_in = pipeline.createXLinkIn()
    age_gender_in.setStreamName("age_gender_in")
    age_gender_nn = pipeline.createNeuralNetwork()
    age_gender_nn.setBlobPath(
        str(
            Path(
                "../depthai-experiments/gen2-age-gender/models/age-gender-recognition-retail-0013.blob"
            ).resolve().absolute()))
    age_gender_nn_xout = pipeline.createXLinkOut()
    age_gender_nn_xout.setStreamName("age_gender_nn")
    age_gender_in.out.link(age_gender_nn.input)
    age_gender_nn.out.link(age_gender_nn_xout.input)

    print("Pipeline created.")
    return pipeline
コード例 #22
0
ファイル: main.py プロジェクト: pwolf15/depthai-experiments
def create_pipeline():
    print("Creating pipeline...")
    pipeline = dai.Pipeline()

    if args.camera:
        # ColorCamera
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(456, 256)
        cam.setResolution(
            dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(dai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)
        controlIn = pipeline.createXLinkIn()
        controlIn.setStreamName('control')
        controlIn.out.link(cam.inputControl)

    # NeuralNetwork
    print("Creating Human Pose Estimation Neural Network...")
    pose_nn = pipeline.createNeuralNetwork()
    if args.camera:
        pose_nn.setBlobPath(
            str(
                Path(
                    "models/human-pose-estimation-0001_openvino_2021.2_6shave.blob"
                ).resolve().absolute()))
    else:
        pose_nn.setBlobPath(
            str(
                Path(
                    "models/human-pose-estimation-0001_openvino_2021.2_8shave.blob"
                ).resolve().absolute()))
    # Increase threads for detection
    pose_nn.setNumInferenceThreads(2)
    # Specify that network takes latest arriving frame in non-blocking manner
    pose_nn.input.setQueueSize(1)
    pose_nn.input.setBlocking(False)
    pose_nn_xout = pipeline.createXLinkOut()
    pose_nn_xout.setStreamName("pose_nn")
    pose_nn.out.link(pose_nn_xout.input)

    if args.camera:
        cam.preview.link(pose_nn.input)
    else:
        pose_in = pipeline.createXLinkIn()
        pose_in.setStreamName("pose_in")
        pose_in.out.link(pose_nn.input)

    print("Pipeline created.")
    return pipeline
コード例 #23
0
ファイル: main.py プロジェクト: luxonis/depthai-experiments
def create_pipeline():
    print("Creating pipeline...")
    pipeline = depthai.Pipeline()

    if args.camera:
        # ColorCamera
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(300, 300)
        cam.setResolution(
            depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)

    # NeuralNetwork
    print("Creating Face Detection Neural Network...")
    detection_nn = pipeline.createMobileNetDetectionNetwork()
    detection_nn.setConfidenceThreshold(0.5)
    detection_nn.setBlobPath(
        str(
            blobconverter.from_zoo(name="face-detection-retail-0004",
                                   shaves=6 if args.camera else 8)))
    detection_nn_xout = pipeline.createXLinkOut()
    detection_nn_xout.setStreamName("detection_nn")
    detection_nn.out.link(detection_nn_xout.input)

    if args.camera:
        cam.preview.link(detection_nn.input)
    else:
        detection_in = pipeline.createXLinkIn()
        detection_in.setStreamName("detection_in")
        detection_in.out.link(detection_nn.input)

    # NeuralNetwork
    print("Creating Age Gender Neural Network...")
    age_gender_in = pipeline.createXLinkIn()
    age_gender_in.setStreamName("age_gender_in")
    age_gender_nn = pipeline.createNeuralNetwork()
    age_gender_nn.setBlobPath(
        str(
            blobconverter.from_zoo(name="age-gender-recognition-retail-0013",
                                   shaves=6 if args.camera else 8)))
    age_gender_nn_xout = pipeline.createXLinkOut()
    age_gender_nn_xout.setStreamName("age_gender_nn")
    age_gender_in.out.link(age_gender_nn.input)
    age_gender_nn.out.link(age_gender_nn_xout.input)

    print("Pipeline created.")
    return pipeline
コード例 #24
0
    def create_pipeline(self, model_blob):
        self.pipeline = dai.Pipeline()

        data_in = self.pipeline.createXLinkIn()
        data_in.setStreamName("data_in")

        self.model_blob = self.pipeline.createNeuralNetwork()
        self.model_blob.setBlobPath(model_blob)
        data_out = self.pipeline.createXLinkOut()
        data_out.setStreamName("data_out")

        data_in.out.link(self.model_blob.input)
        self.model_blob.out.link(data_out.input)
コード例 #25
0
def create_pipeline(depth_enabled=True):
    pipeline = dai.Pipeline()

    rgb = pipeline.createColorCamera()
    rgb.setPreviewSize(300, 300)
    rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
    rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    rgb.setInterleaved(False)
    rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)

    controlIn = pipeline.createXLinkIn()
    controlIn.setStreamName('control')
    controlIn.out.link(rgb.inputControl)

    rgbOut = pipeline.createXLinkOut()
    rgbOut.setStreamName("color")
    rgb.preview.link(rgbOut.input)

    if depth_enabled:
        left = pipeline.createMonoCamera()
        left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
        left.setBoardSocket(dai.CameraBoardSocket.LEFT)

        right = pipeline.createMonoCamera()
        right.setResolution(
            dai.MonoCameraProperties.SensorResolution.THE_400_P)
        right.setBoardSocket(dai.CameraBoardSocket.RIGHT)

        depth = pipeline.createStereoDepth()
        depth.initialConfig.setConfidenceThreshold(255)
        median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7
        depth.initialConfig.setMedianFilter(median)
        depth.setLeftRightCheck(False)
        depth.setExtendedDisparity(False)
        depth.setSubpixel(False)

        left.out.link(depth.left)
        right.out.link(depth.right)

        # Create output
        leftOut = pipeline.createXLinkOut()
        leftOut.setStreamName("left")
        left.out.link(leftOut.input)
        rightOut = pipeline.createXLinkOut()
        rightOut.setStreamName("right")
        right.out.link(rightOut.input)
        depthOut = pipeline.createXLinkOut()
        depthOut.setStreamName("disparity")
        depth.disparity.link(depthOut.input)

    return pipeline
コード例 #26
0
def create_pipeline():
    print("Creating pipeline...")
    pipeline = dai.Pipeline()
    pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_3)

    # ColorCamera
    print("Creating Color Camera...")
    cam = pipeline.create(dai.node.ColorCamera)
    cam.setPreviewSize(300, 300)
    cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam.setVideoSize(1080, 1080)
    cam.setInterleaved(False)

    cam_xout = pipeline.createXLinkOut()
    cam_xout.setStreamName("frame")
    cam.video.link(cam_xout.input)

    # NeuralNetwork
    print("Creating Face Detection Neural Network...")
    face_det_nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
    face_det_nn.setConfidenceThreshold(0.5)
    face_det_nn.setBlobPath(
        blobconverter.from_zoo(name="face-detection-retail-0004",
                               shaves=6,
                               version='2021.3'))
    # Link Face ImageManip -> Face detection NN node
    cam.preview.link(face_det_nn.input)

    objectTracker = pipeline.createObjectTracker()
    objectTracker.setDetectionLabelsToTrack([1])  # track only person
    # possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS, SHORT_TERM_IMAGELESS, SHORT_TERM_KCF
    objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)
    # take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID
    objectTracker.setTrackerIdAssigmentPolicy(
        dai.TrackerIdAssigmentPolicy.SMALLEST_ID)

    # Linking
    face_det_nn.passthrough.link(objectTracker.inputDetectionFrame)
    face_det_nn.passthrough.link(objectTracker.inputTrackerFrame)
    face_det_nn.out.link(objectTracker.inputDetections)
    # Send face detections to the host (for bounding boxes)

    pass_xout = pipeline.create(dai.node.XLinkOut)
    pass_xout.setStreamName("pass_out")
    objectTracker.passthroughTrackerFrame.link(pass_xout.input)

    tracklets_xout = pipeline.create(dai.node.XLinkOut)
    tracklets_xout.setStreamName("tracklets")
    objectTracker.out.link(tracklets_xout.input)
    print("Pipeline created.")
    return pipeline
コード例 #27
0
    def create_pipeline(self):
        print("Creating pipeline...")
        pipeline = dai.Pipeline()
        pipeline.setOpenVINOVersion(
            version=dai.OpenVINO.Version.VERSION_2021_2)
        self.pd_input_length = 128

        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(self.preview_width, self.preview_height)
        cam.setInterleaved(False)
        cam.setBoardSocket(dai.CameraBoardSocket.RGB)
        cam_out = pipeline.createXLinkOut()
        cam_out.setStreamName("cam_out")
        cam.preview.link(cam_out.input)

        print("Creating Palm Detection Neural Network...")
        pd_nn = pipeline.createNeuralNetwork()
        pd_nn.setBlobPath(str(Path(self.pd_path).resolve().absolute()))
        pd_in = pipeline.createXLinkIn()
        pd_in.setStreamName("pd_in")
        pd_in.out.link(pd_nn.input)
        pd_out = pipeline.createXLinkOut()
        pd_out.setStreamName("pd_out")
        pd_nn.out.link(pd_out.input)

        print("Creating Hand Landmark Neural Network...")
        lm_nn = pipeline.createNeuralNetwork()
        lm_nn.setBlobPath(str(Path(self.lm_path).resolve().absolute()))
        self.lm_input_length = 224
        lm_in = pipeline.createXLinkIn()
        lm_in.setStreamName("lm_in")
        lm_in.out.link(lm_nn.input)
        lm_out = pipeline.createXLinkOut()
        lm_out.setStreamName("lm_out")
        lm_nn.out.link(lm_out.input)

        print("Creating Hand ASL Recognition Neural Network...")
        asl_nn = pipeline.createNeuralNetwork()
        asl_nn.setBlobPath(str(Path(self.asl_path).resolve().absolute()))
        self.asl_input_length = 224
        asl_in = pipeline.createXLinkIn()
        asl_in.setStreamName("asl_in")
        asl_in.out.link(asl_nn.input)
        asl_out = pipeline.createXLinkOut()
        asl_out.setStreamName("asl_out")
        asl_nn.out.link(asl_out.input)

        print("Pipeline created.")
        return pipeline
コード例 #28
0
def create_pipeline_people_reidentification():
    global useOAKDCam
    print("Creating pipeline...")
    pipeline = depthai.Pipeline()

    if useOAKDCam:
        # ColorCamera
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(544, 320)
        cam.setResolution(
            depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)

    # NeuralNetwork
    print("Creating Person Detection Neural Network...")
    detection_in = pipeline.createXLinkIn()
    detection_in.setStreamName("detection_in")
    detection_nn = pipeline.createNeuralNetwork()
    detection_nn.setBlobPath(
        str(
            Path(
                "../depthai-experiments/pedestrian-reidentification/models/person-detection-retail-0013.blob"
            ).resolve().absolute()))
    detection_nn_xout = pipeline.createXLinkOut()
    detection_nn_xout.setStreamName("detection_nn")
    detection_in.out.link(detection_nn.input)
    detection_nn.out.link(detection_nn_xout.input)

    # NeuralNetwork
    print("Creating Person Reidentification Neural Network...")
    reid_in = pipeline.createXLinkIn()
    reid_in.setStreamName("reid_in")
    reid_nn = pipeline.createNeuralNetwork()
    reid_nn.setBlobPath(
        str(
            Path(
                "../depthai-experiments/pedestrian-reidentification/models/person-reidentification-retail-0031.blob"
            ).resolve().absolute()))
    reid_nn_xout = pipeline.createXLinkOut()
    reid_nn_xout.setStreamName("reid_nn")
    reid_in.out.link(reid_nn.input)
    reid_nn.out.link(reid_nn_xout.input)

    print("Pipeline created.")
    return pipeline
コード例 #29
0
ファイル: oak_camera.py プロジェクト: shumwaymark/imagenode
    def MobileNetSSD(self):
        NN_SIZE = (300, 300)
        NN_PATH = '/home/pi/depthai/depthai-python/examples/models/mobilenet-ssd_openvino_2021.4_6shave.blob'

        # Create pipeline
        pipeline = dai.Pipeline()

        # Define nodes and outputs
        nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
        cam = pipeline.create(dai.node.ColorCamera)
        encoder = pipeline.create(dai.node.VideoEncoder)

        xoutFrames = pipeline.create(dai.node.XLinkOut)
        xoutJPEG = pipeline.create(dai.node.XLinkOut)
        xoutNN = pipeline.create(dai.node.XLinkOut)

        xoutFrames.setStreamName("frames")
        xoutJPEG.setStreamName("jpegs")
        xoutNN.setStreamName("nn")

        # Properties
        nn.setConfidenceThreshold(0.5)
        nn.setBlobPath(NN_PATH)

        cam.setPreviewSize(NN_SIZE)
        cam.setPreviewKeepAspectRatio(False)
        cam.setInterleaved(False)
        cam.setIspScale(1, 3)
        cam.setFps(30)
        cam.setBoardSocket(dai.CameraBoardSocket.RGB)
        # scale collection down from 4K to just FullHD
        cam.setResolution(
            dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setVideoSize(640, 360)  # reduce further for storage

        encoder.setDefaultProfilePreset(
            1, dai.VideoEncoderProperties.Profile.MJPEG)

        # Linking
        cam.video.link(encoder.input)
        cam.video.link(xoutFrames.input)
        encoder.bitstream.link(xoutJPEG.input)
        cam.preview.link(nn.input)
        nn.out.link(xoutNN.input)

        # Connect to device and start pipeline
        device = dai.Device(pipeline)
        return device
コード例 #30
0
ファイル: main.py プロジェクト: sandhyacs/depthai-experiments
def create_spi_demo_pipeline(nnPath):
    print("Creating SPI pipeline: ")
    print("COLOR CAM -> DetectionNetwork -> SPI OUT")

    pipeline = dai.Pipeline()

    # testing YOLO DetectionNetwork 
    detectionNetwork = pipeline.createYoloDetectionNetwork()
    detectionNetwork.setConfidenceThreshold(0.5)
    detectionNetwork.setBlobPath(nnPath)
    detectionNetwork.setNumClasses(80)
    detectionNetwork.setCoordinateSize(4)
    anchors = np.array([10,14, 23,27, 37,58, 81,82, 135,169, 344,319])
    detectionNetwork.setAnchors(anchors)
    anchorMasks26 = np.array([1,2,3])
    anchorMasks13 = np.array([3,4,5])
    anchorMasks = {
        "side26": anchorMasks26,
        "side13": anchorMasks13,
    }
    detectionNetwork.setAnchorMasks(anchorMasks)
    detectionNetwork.setIouThreshold(0.5)


#    # testing MobileNet DetectionNetwork
#    detectionNetwork = pipeline.createMobileNetDetectionNetwork()
#    detectionNetwork.setConfidenceThreshold(0.5)
#    detectionNetwork.setBlobPath(nnPath)

    # set up color camera and link to NN node
    colorCam = pipeline.createColorCamera()
    colorCam.setPreviewSize(416, 416)
#    colorCam.setPreviewSize(300, 300)
    colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    colorCam.setInterleaved(False)
    colorCam.setCamId(0)
    colorCam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
    colorCam.preview.link(detectionNetwork.input)


    # set up SPI out node
    spiOut = pipeline.createSPIOut()
    spiOut.setStreamName("spimetaout")
    spiOut.setBusId(0)
    detectionNetwork.out.link(spiOut.input)

    return pipeline