Esempio n. 1
0
def create_pipeline():
    print("Creating pipeline...")
    pipeline = depthai.Pipeline()

    if args.camera:
        # ColorCamera
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(300, 300)
        cam.setResolution(
            depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)

    # NeuralNetwork
    print("Creating Face Detection Neural Network...")
    detection_nn = pipeline.createMobileNetDetectionNetwork()
    detection_nn.setConfidenceThreshold(0.5)
    detection_nn.setBlobPath(
        str(
            blobconverter.from_zoo(name="face-detection-retail-0004",
                                   shaves=6 if args.camera else 8)))
    detection_nn_xout = pipeline.createXLinkOut()
    detection_nn_xout.setStreamName("detection_nn")
    detection_nn.out.link(detection_nn_xout.input)

    if args.camera:
        cam.preview.link(detection_nn.input)
    else:
        detection_in = pipeline.createXLinkIn()
        detection_in.setStreamName("detection_in")
        detection_in.out.link(detection_nn.input)

    # NeuralNetwork
    print("Creating Age Gender Neural Network...")
    age_gender_in = pipeline.createXLinkIn()
    age_gender_in.setStreamName("age_gender_in")
    age_gender_nn = pipeline.createNeuralNetwork()
    age_gender_nn.setBlobPath(
        str(
            blobconverter.from_zoo(name="age-gender-recognition-retail-0013",
                                   shaves=6 if args.camera else 8)))
    age_gender_nn_xout = pipeline.createXLinkOut()
    age_gender_nn_xout.setStreamName("age_gender_nn")
    age_gender_in.out.link(age_gender_nn.input)
    age_gender_nn.out.link(age_gender_nn_xout.input)

    print("Pipeline created.")
    return pipeline
Esempio n. 2
0
def getPipeline():
    # Start defining a pipeline
    pipeline = dai.Pipeline()

    # Define a source - color camera
    cam_rgb = pipeline.createColorCamera()
    # For the demo, just set a larger RGB preview size for OAK-D
    cam_rgb.setPreviewSize(300, 300)
    cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
    cam_rgb.setResolution(
        dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam_rgb.setInterleaved(False)

    detector = pipeline.createMobileNetDetectionNetwork()
    detector.setConfidenceThreshold(0.5)
    detector.setBlobPath(blobconverter.from_zoo(name="mobilenet-ssd",
                                                shaves=6))
    cam_rgb.preview.link(detector.input)

    # Create output
    xout_rgb = pipeline.createXLinkOut()
    xout_rgb.setStreamName("rgb")
    detector.passthrough.link(xout_rgb.input)

    xout_nn = pipeline.createXLinkOut()
    xout_nn.setStreamName("nn")
    detector.out.link(xout_nn.input)

    return pipeline
Esempio n. 3
0
def create_pipeline(video):
    pipeline = dai.Pipeline()
    # pipeline.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)

    mesh = pipeline.create(dai.node.NeuralNetwork)
    mesh.setBlobPath("models/face_landmark_openvino_2021.4_6shave.blob")
    mesh.setNumInferenceThreads(2)
    mesh.input.setBlocking(False)
    mesh.input.setQueueSize(2)

    if video:
        mesh_in = pipeline.create(dai.node.XLinkIn)
        mesh_in.setStreamName("mesh_in")
        mesh_in.setMaxDataSize(192 * 192 * 3)
        mesh_in.out.link(mesh.input)
    else:
        cam = pipeline.create(dai.node.ColorCamera)
        cam.setResolution(
            dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setBoardSocket(dai.CameraBoardSocket.RGB)
        cam.setInterleaved(False)
        cam.setPreviewSize(preview_size)

        cam_out = pipeline.create(dai.node.XLinkOut)
        cam_out.setStreamName("rgb")
        cam.preview.link(cam_out.input)
        manip = pipeline.create(dai.node.ImageManip)
        manip.initialConfig.setResize(192, 192)
        cam.preview.link(manip.inputImage)
        manip.out.link(mesh.input)
        # cam.preview.link(face_det.input)

    mesh_out = pipeline.create(dai.node.XLinkOut)
    mesh_out.setStreamName("mesh_nn")
    mesh.out.link(mesh_out.input)

    eye = pipeline.create(dai.node.NeuralNetwork)
    eye.setBlobPath(
        blobconverter.from_zoo(
            "open-closed-eye-0001",
            shaves=6,
            # version=pipeline.getOpenVINOVersion()
        ))
    eye.setNumInferenceThreads(2)
    eye.input.setBlocking(False)
    eye.input.setQueueSize(2)

    eye_in = pipeline.create(dai.node.XLinkIn)
    eye_in.setStreamName("eye_in")
    eye_in.setMaxDataSize(32 * 32 * 3)
    eye_in.out.link(eye.input)

    eye_out = pipeline.create(dai.node.XLinkOut)
    eye_out.setStreamName("eye_nn")
    eye.out.link(eye_out.input)
    return pipeline
    def create_pipeline(self, model_name):
        log.info("Creating DepthAI pipeline...")

        pipeline = dai.Pipeline()
        #pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_2)

        # Define sources and outputs
        camRgb = pipeline.createColorCamera()
        spatialDetectionNetwork = pipeline.createMobileNetSpatialDetectionNetwork(
        )
        monoLeft = pipeline.createMonoCamera()
        monoRight = pipeline.createMonoCamera()
        stereo = pipeline.createStereoDepth()

        xoutRgb = pipeline.createXLinkOut()
        camRgb.preview.link(xoutRgb.input)
        xoutNN = pipeline.createXLinkOut()

        xoutRgb.setStreamName("rgb")
        xoutNN.setStreamName("detections")

        # Properties
        camRgb.setPreviewSize(544, 320)
        camRgb.setResolution(
            dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        camRgb.setInterleaved(False)
        camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)

        monoLeft.setResolution(
            dai.MonoCameraProperties.SensorResolution.THE_400_P)
        monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
        monoRight.setResolution(
            dai.MonoCameraProperties.SensorResolution.THE_400_P)
        monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

        # Setting node configs
        stereo.setConfidenceThreshold(255)

        spatialDetectionNetwork.setBlobPath(
            blobconverter.from_zoo(name=model_name, shaves=6))
        spatialDetectionNetwork.setConfidenceThreshold(0.5)
        spatialDetectionNetwork.input.setBlocking(False)
        spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
        spatialDetectionNetwork.setDepthLowerThreshold(100)
        spatialDetectionNetwork.setDepthUpperThreshold(5000)

        # Linking
        monoLeft.out.link(stereo.left)
        monoRight.out.link(stereo.right)

        camRgb.preview.link(spatialDetectionNetwork.input)

        spatialDetectionNetwork.out.link(xoutNN.input)
        stereo.depth.link(spatialDetectionNetwork.inputDepth)
        log.info("Pipeline created.")
        return pipeline
Esempio n. 5
0
def create_pipeline():
    print("Creating pipeline...")
    pipeline = dai.Pipeline()
    pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_3)

    # ColorCamera
    print("Creating Color Camera...")
    cam = pipeline.create(dai.node.ColorCamera)
    cam.setPreviewSize(300, 300)
    cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam.setVideoSize(1080, 1080)
    cam.setInterleaved(False)

    cam_xout = pipeline.createXLinkOut()
    cam_xout.setStreamName("frame")
    cam.video.link(cam_xout.input)

    # NeuralNetwork
    print("Creating Face Detection Neural Network...")
    face_det_nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
    face_det_nn.setConfidenceThreshold(0.5)
    face_det_nn.setBlobPath(
        blobconverter.from_zoo(name="face-detection-retail-0004",
                               shaves=6,
                               version='2021.3'))
    # Link Face ImageManip -> Face detection NN node
    cam.preview.link(face_det_nn.input)

    objectTracker = pipeline.createObjectTracker()
    objectTracker.setDetectionLabelsToTrack([1])  # track only person
    # possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS, SHORT_TERM_IMAGELESS, SHORT_TERM_KCF
    objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)
    # take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID
    objectTracker.setTrackerIdAssigmentPolicy(
        dai.TrackerIdAssigmentPolicy.SMALLEST_ID)

    # Linking
    face_det_nn.passthrough.link(objectTracker.inputDetectionFrame)
    face_det_nn.passthrough.link(objectTracker.inputTrackerFrame)
    face_det_nn.out.link(objectTracker.inputDetections)
    # Send face detections to the host (for bounding boxes)

    pass_xout = pipeline.create(dai.node.XLinkOut)
    pass_xout.setStreamName("pass_out")
    objectTracker.passthroughTrackerFrame.link(pass_xout.input)

    tracklets_xout = pipeline.create(dai.node.XLinkOut)
    tracklets_xout.setStreamName("tracklets")
    objectTracker.out.link(tracklets_xout.input)
    print("Pipeline created.")
    return pipeline
Esempio n. 6
0
def create_pipeline():
    print("Creating pipeline...")
    pipeline = depthai.Pipeline()
    if camera:
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(300, 300)
        cam.setResolution(
            depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)
        first_models(
            cam,
            pipeline,
            blobconverter.from_zoo("face-detection-retail-0004", shaves=6),
            "face",
        )
    else:
        models(
            pipeline,
            blobconverter.from_zoo("face-detection-retail-0004", shaves=8),
            "face",
        )

    models(
        pipeline,
        blobconverter.from_openvino(
            "models/face_landmark_160x160.xml",
            "models/face_landmark_160x160.bin",
            shaves=shaves,
        ),
        "land68",
    )
    return pipeline
Esempio n. 7
0
 def compile(self, shaves, target='auto'):
     if self.use_zoo:
         return blobconverter.from_zoo(name=self.model_name, shaves=shaves)
     else:
         return blobconverter.compile_blob(
             blob_name=self.model_name,
             req_data={
                 "name": self.model_name,
                 "use_zoo": True,
             },
             req_files={
                 'config': self.config_file,
             },
             data_type="FP16",
             shaves=shaves,
         )
Esempio n. 8
0
 def create_nns(self):
     self.create_mobilenet_nn(
         blobconverter.from_zoo("face-detection-retail-0004",
                                shaves=shaves),
         "face",
         first=True,
     )
     # https://github.com/sbdcv/sbd_mask/tree/master/model
     # https://github.com/sbdcv/sbd_mask/blob/8e25fbd550339857f6466016d3ed0866e759ab47/deploy.py#L11-L12
     self.create_nn(
         blobconverter.from_onnx(
             (Path(__file__).parent /
              Path("models/sbd_mask.onnx")).as_posix(),
             optimizer_params=[
                 "--scale_values=[255,255,255]",
                 "--reverse_input_channels",
             ],
             shaves=shaves,
         ),
         "mask",
     )
    def __init__(self, application, pc_id, options):
        super().__init__(application, pc_id, options)
        self.frame = np.zeros((self.options.height, self.options.width, 3),
                              np.uint8)
        self.frame[:] = (0, 0, 0)
        self.detections = []
        self.pipeline = dai.Pipeline()
        self.camRgb = self.pipeline.createColorCamera()
        self.xoutRgb = self.pipeline.createXLinkOut()

        self.xoutRgb.setStreamName("rgb")

        # Properties
        self.camRgb.setPreviewSize(self.options.width, self.options.height)
        self.camRgb.setInterleaved(False)
        self.camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)

        # Linking
        self.camRgb.preview.link(self.xoutRgb.input)
        self.nn = None
        if options.nn != "":
            self.nn = self.pipeline.createMobileNetDetectionNetwork()
            self.nn.setConfidenceThreshold(0.5)
            self.nn.setBlobPath(
                str(blobconverter.from_zoo(options.nn, shaves=6)))
            self.nn.setNumInferenceThreads(2)
            self.nn.input.setBlocking(False)
            self.nnOut = self.pipeline.createXLinkOut()
            self.nnOut.setStreamName("nn")
            self.camRgb.preview.link(self.nn.input)
            self.nn.out.link(self.nnOut.input)
        self.device = dai.Device(self.pipeline)
        self.qRgb = self.device.getOutputQueue(name="rgb",
                                               maxSize=1,
                                               blocking=False)
        if self.nn is not None:
            self.qDet = self.device.getOutputQueue(name="nn",
                                                   maxSize=4,
                                                   blocking=False)
        self.device.startPipeline()
Esempio n. 10
0
        for y_col in channel for val in y_col
    ]


# Start defining a pipeline
pipeline = dai.Pipeline()

# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setInterleaved(False)

# Define a neural network that will make predictions based on the source frames
detection_nn = pipeline.createNeuralNetwork()
detection_nn.setBlobPath(
    str(blobconverter.from_zoo(name="face-detection-retail-0004", shaves=6)))
cam_rgb.preview.link(detection_nn.input)

landmarks_nn = pipeline.createNeuralNetwork()
landmarks_nn.setBlobPath(
    str(
        blobconverter.from_zoo(name="landmarks-regression-retail-0009",
                               shaves=6)))

# Create outputs
xin_rgb = pipeline.createXLinkIn()
xin_rgb.setStreamName("land_in")
xin_rgb.out.link(landmarks_nn.input)

# Create outputs
xout_frame = pipeline.createXLinkOut()
Esempio n. 11
0
def create_depthai_pipeline():
    # Start defining a pipeline
    pipeline = dai.Pipeline()

    # Define a source - two mono (grayscale) cameras
    left = pipeline.createMonoCamera()
    left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    left.setBoardSocket(dai.CameraBoardSocket.LEFT)

    right = pipeline.createMonoCamera()
    right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    right.setBoardSocket(dai.CameraBoardSocket.RIGHT)

    # Create a node that will produce the depth map
    depth = pipeline.createStereoDepth()
    depth.setConfidenceThreshold(200)
    depth.setOutputRectified(
        True)  # The rectified streams are horizontally mirrored by default
    depth.setRectifyEdgeFillColor(0)  # Black, to better see the cutout
    depth.setExtendedDisparity(True)  # For better close range depth perception

    median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7  # For depth filtering
    depth.setMedianFilter(median)

    # Linking mono cameras with depth node
    left.out.link(depth.left)
    right.out.link(depth.right)

    # Create left output
    xOutRight = pipeline.createXLinkOut()
    xOutRight.setStreamName("right")
    depth.rectifiedRight.link(xOutRight.input)

    # Create depth output
    xOutDisp = pipeline.createXLinkOut()
    xOutDisp.setStreamName("disparity")
    depth.disparity.link(xOutDisp.input)

    # Create input and output node for Depth Classification
    xDepthIn = pipeline.createXLinkIn()
    xDepthIn.setStreamName("depth_in")
    xOutDepthNn = pipeline.createXLinkOut()
    xOutDepthNn.setStreamName("depth_nn")

    # Define Depth Classification NN node
    depthNn = pipeline.createNeuralNetwork()
    depthNn.setBlobPath(
        "data/depth-classification-models/depth_classification_ipscaled_model.blob"
    )
    depthNn.input.setBlocking(False)

    # Linking
    xDepthIn.out.link(depthNn.input)
    depthNn.out.link(xOutDepthNn.input)

    # Convert detection model from OMZ to blob
    if DET_MODEL_NAME is not None:
        facedet_blob_path = blobconverter.from_zoo(name=DET_MODEL_NAME,
                                                   shaves=6,
                                                   zoo_type=DET_ZOO_TYPE)

    # Create Face Detection NN node
    faceDetNn = pipeline.createMobileNetDetectionNetwork()
    faceDetNn.setConfidenceThreshold(0.75)
    faceDetNn.setBlobPath(facedet_blob_path)

    # Create ImageManip to convert grayscale mono camera frame to RGB
    copyManip = pipeline.createImageManip()
    depth.rectifiedRight.link(copyManip.inputImage)
    # copyManip.initialConfig.setHorizontalFlip(True)
    copyManip.initialConfig.setFrameType(dai.RawImgFrame.Type.RGB888p)

    # Create ImageManip to preprocess input frame for detection NN
    detManip = pipeline.createImageManip()
    # detManip.initialConfig.setHorizontalFlip(True)
    detManip.initialConfig.setResize(DET_INPUT_SIZE[0], DET_INPUT_SIZE[1])
    detManip.initialConfig.setKeepAspectRatio(False)

    # Linking detection ImageManip to detection NN
    copyManip.out.link(detManip.inputImage)
    detManip.out.link(faceDetNn.input)

    # Create output steam for detection output
    xOutDet = pipeline.createXLinkOut()
    xOutDet.setStreamName('det_out')
    faceDetNn.out.link(xOutDet.input)

    # Script node will take the output from the face detection NN as an input and set ImageManipConfig
    # to crop the initial frame for recognition NN
    script = pipeline.create(dai.node.Script)
    script.setProcessor(dai.ProcessorType.LEON_CSS)
    script.setScriptPath("script.py")

    # Set inputs for script node
    copyManip.out.link(script.inputs['frame'])
    faceDetNn.out.link(script.inputs['face_det_in'])

    # Convert recognition model from OMZ to blob
    if REC_MODEL_NAME is not None:
        facerec_blob_path = blobconverter.from_zoo(name=REC_MODEL_NAME,
                                                   shaves=6,
                                                   zoo_type=REC_ZOO_TYPE)

    # Create Face Recognition NN node
    faceRecNn = pipeline.createNeuralNetwork()
    faceRecNn.setBlobPath(facerec_blob_path)

    # Create ImageManip to preprocess frame for recognition NN
    recManip = pipeline.createImageManip()

    # Set recognition ImageManipConfig from script node
    script.outputs['manip_cfg'].link(recManip.inputConfig)
    script.outputs['manip_img'].link(recManip.inputImage)

    # Create output steam for recognition output
    xOutRec = pipeline.createXLinkOut()
    xOutRec.setStreamName('rec_out')
    faceRecNn.out.link(xOutRec.input)

    recManip.out.link(faceRecNn.input)

    return pipeline
Esempio n. 12
0
def draw(data, frame):
    if len(data) == 0:
        return
    cv2.addWeighted(frame, 1, cv2.resize(data, frame.shape[:2][::-1]), 0.2, 0,
                    frame)


# Start defining a pipeline
pm = PipelineManager()
pm.createColorCam(previewSize=nn_shape)

nm = NNetManager(inputSize=nn_shape)
pm.setNnManager(nm)
pm.addNn(nm.createNN(
    pm.pipeline, pm.nodes,
    blobconverter.from_zoo(name='road-segmentation-adas-0001', shaves=6)),
         sync=True)
fps = FPSHandler()
pv = PreviewManager(display=[Previews.color.name], fpsHandler=fps)

# Pipeline is defined, now we can connect to the device
with dai.Device(pm.pipeline) as device:
    nm.createQueues(device)
    pv.createQueues(device)

    while True:
        fps.tick('color')
        pv.prepareFrames(blocking=True)
        frame = pv.get(Previews.color.name)

        road_decoded = decode(nm.outputQueue.get())
Esempio n. 13
0
'''

# --------------- Arguments ---------------
parser = argparse.ArgumentParser()
parser.add_argument("-w",
                    "--width",
                    help="select model width for inference",
                    default=320,
                    type=int)

args = parser.parse_args()

# choose width and height based on model
if args.width == 320:
    NN_WIDTH, NN_HEIGHT = 320, 256
    NN_PATH = blobconverter.from_zoo(name="fast_depth_256x320",
                                     zoo_type="depthai")
elif args.width == 640:
    NN_WIDTH, NN_HEIGHT = 640, 480
    NN_PATH = blobconverter.from_zoo(name="fast_depth_480x640",
                                     zoo_type="depthai")
else:
    raise ValueError(f"Width can be only 320 or 640, not {args.width}")

# --------------- Pipeline ---------------
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)

# Define a neural network
detection_nn = pipeline.createNeuralNetwork()
detection_nn.setBlobPath(str(NN_PATH))
Esempio n. 14
0
def create_pipeline(depth):
    # Start defining a pipeline
    pipeline = dai.Pipeline()
    pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_2)
    # Define a source - color camera
    colorCam = pipeline.createColorCamera()
    if depth:
        mobilenet = pipeline.createMobileNetSpatialDetectionNetwork()
        monoLeft = pipeline.createMonoCamera()
        monoRight = pipeline.createMonoCamera()
        stereo = pipeline.createStereoDepth()
    else:
        mobilenet = pipeline.createMobileNetDetectionNetwork()

    colorCam.setPreviewSize(300, 300)
    colorCam.setResolution(
        dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    colorCam.setInterleaved(False)
    colorCam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)

    mobilenet.setBlobPath(
        str(blobconverter.from_zoo("mobilenet-ssd", shaves=6,
                                   version="2021.2")))
    mobilenet.setConfidenceThreshold(0.5)
    mobilenet.input.setBlocking(False)

    if depth:
        monoLeft.setResolution(
            dai.MonoCameraProperties.SensorResolution.THE_400_P)
        monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
        monoRight.setResolution(
            dai.MonoCameraProperties.SensorResolution.THE_400_P)
        monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

        # Setting node configs
        stereo.initialConfig.setConfidenceThreshold(255)
        stereo.depth.link(mobilenet.inputDepth)

        mobilenet.setBoundingBoxScaleFactor(0.5)
        mobilenet.setDepthLowerThreshold(100)
        mobilenet.setDepthUpperThreshold(5000)

        monoLeft.out.link(stereo.left)
        monoRight.out.link(stereo.right)

    xoutRgb = pipeline.createXLinkOut()
    xoutRgb.setStreamName("rgb")
    colorCam.preview.link(mobilenet.input)
    if syncNN:
        mobilenet.passthrough.link(xoutRgb.input)
    else:
        colorCam.preview.link(xoutRgb.input)

    xoutNN = pipeline.createXLinkOut()
    xoutNN.setStreamName("detections")
    mobilenet.out.link(xoutNN.input)

    if depth:
        xoutBoundingBoxDepthMapping = pipeline.createXLinkOut()
        xoutBoundingBoxDepthMapping.setStreamName("boundingBoxDepthMapping")
        mobilenet.boundingBoxMapping.link(xoutBoundingBoxDepthMapping.input)

        xoutDepth = pipeline.createXLinkOut()
        xoutDepth.setStreamName("depth")
        mobilenet.passthroughDepth.link(xoutDepth.input)

    return pipeline
Esempio n. 15
0
def create_pipeline():
    print("Creating pipeline...")
    pipeline = dai.Pipeline()
    pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_3)

    # ColorCamera
    print("Creating Color Camera...")
    cam = pipeline.create(dai.node.ColorCamera)
    cam.setPreviewSize(300, 300)
    cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam.setVideoSize(1080,1080)
    cam.setInterleaved(False)

    controlIn = pipeline.createXLinkIn()
    controlIn.setStreamName('control')
    controlIn.out.link(cam.inputControl)

    left = pipeline.createMonoCamera()
    left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    left.setBoardSocket(dai.CameraBoardSocket.LEFT)

    right = pipeline.createMonoCamera()
    right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
    right.setBoardSocket(dai.CameraBoardSocket.RIGHT)

    stereo = pipeline.createStereoDepth()
    stereo.initialConfig.setConfidenceThreshold(240)
    stereo.setExtendedDisparity(True)
    left.out.link(stereo.left)
    right.out.link(stereo.right)

    cam_xout = pipeline.createXLinkOut()
    cam_xout.setStreamName("frame")
    cam.video.link(cam_xout.input)

    # NeuralNetwork
    print("Creating Face Detection Neural Network...")
    face_det_nn = pipeline.createMobileNetSpatialDetectionNetwork()
    face_det_nn.setConfidenceThreshold(0.4)
    face_det_nn.setBlobPath(blobconverter.from_zoo(
        name="face-detection-retail-0004",
        shaves=6,
        version='2021.3'
    ))

    face_det_nn.setBoundingBoxScaleFactor(0.5)
    face_det_nn.setDepthLowerThreshold(200)
    face_det_nn.setDepthUpperThreshold(3000)

    cam.preview.link(face_det_nn.input)
    stereo.depth.link(face_det_nn.inputDepth)

    pass_xout = pipeline.create(dai.node.XLinkOut)
    pass_xout.setStreamName("pass_out")
    face_det_nn.passthrough.link(pass_xout.input)

    nn_xout = pipeline.create(dai.node.XLinkOut)
    nn_xout.setStreamName("nn_out")
    face_det_nn.out.link(nn_xout.input)

    if DEBUG:
        bb_xout = pipeline.create(dai.node.XLinkOut)
        bb_xout.setStreamName('bb')
        face_det_nn.boundingBoxMapping.link(bb_xout.input)

        pass_xout = pipeline.create(dai.node.XLinkOut)
        pass_xout.setStreamName('pass')
        face_det_nn.passthroughDepth.link(pass_xout.input)
    print("Pipeline created.")
    return pipeline
Esempio n. 16
0
controlIn = pipeline.createXLinkIn()
controlIn.setStreamName('control')
controlIn.out.link(colorCam.inputControl)

cam_xout = pipeline.createXLinkOut()
cam_xout.setStreamName('video')
colorCam.video.link(cam_xout.input)

# ---------------------------------------
# 1st stage NN - text-detection
# ---------------------------------------

nn = pipeline.createNeuralNetwork()
nn.setBlobPath(
    blobconverter.from_zoo(name="east_text_detection_256x256",
                           zoo_type="depthai",
                           shaves=6,
                           version=version))
nn.setNumPoolFrames(1)
colorCam.preview.link(nn.input)

det_passthrough_xout = pipeline.createXLinkOut()
det_passthrough_xout.setStreamName('det_passthrough')
# Only send metadata, we are only interested in timestamp, so we can sync
# depth frames with NN output
det_passthrough_xout.setMetadataOnly(True)
nn.passthrough.link(det_passthrough_xout.input)

nn_xout = pipeline.createXLinkOut()
nn_xout.setStreamName('detections')
nn.out.link(nn_xout.input)
Esempio n. 17
0
def create_pipeline():
    print("Creating pipeline...")
    pipeline = dai.Pipeline()
    pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2020_3)

    if args.camera:
        print("Creating Color Camera...")
        cam = pipeline.createColorCamera()
        cam.setPreviewSize(672, 384)
        cam.setResolution(
            dai.ColorCameraProperties.SensorResolution.THE_1080_P)
        cam.setInterleaved(False)
        cam.setBoardSocket(dai.CameraBoardSocket.RGB)
        cam_xout = pipeline.createXLinkOut()
        cam_xout.setStreamName("cam_out")
        cam.preview.link(cam_xout.input)

    # NeuralNetwork
    print("Creating License Plates Detection Neural Network...")
    det_nn = pipeline.createMobileNetDetectionNetwork()
    det_nn.setConfidenceThreshold(0.5)
    det_nn.setBlobPath(
        str(
            blobconverter.from_zoo(
                name="vehicle-license-plate-detection-barrier-0106",
                shaves=4,
                version=openvino_version)))
    det_nn.input.setQueueSize(1)
    det_nn.input.setBlocking(False)
    det_nn_xout = pipeline.createXLinkOut()
    det_nn_xout.setStreamName("det_nn")
    det_nn.out.link(det_nn_xout.input)
    det_pass = pipeline.createXLinkOut()
    det_pass.setStreamName("det_pass")
    det_nn.passthrough.link(det_pass.input)

    if args.camera:
        manip = pipeline.createImageManip()
        manip.initialConfig.setResize(300, 300)
        manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
        cam.preview.link(manip.inputImage)
        manip.out.link(det_nn.input)
    else:
        det_xin = pipeline.createXLinkIn()
        det_xin.setStreamName("det_in")
        det_xin.out.link(det_nn.input)

    # NeuralNetwork
    print("Creating Vehicle Detection Neural Network...")
    veh_nn = pipeline.createMobileNetDetectionNetwork()
    veh_nn.setConfidenceThreshold(0.5)
    veh_nn.setBlobPath(
        str(
            blobconverter.from_zoo(name="vehicle-detection-adas-0002",
                                   shaves=4,
                                   version=openvino_version)))
    veh_nn.input.setQueueSize(1)
    veh_nn.input.setBlocking(False)
    veh_nn_xout = pipeline.createXLinkOut()
    veh_nn_xout.setStreamName("veh_nn")
    veh_nn.out.link(veh_nn_xout.input)
    veh_pass = pipeline.createXLinkOut()
    veh_pass.setStreamName("veh_pass")
    veh_nn.passthrough.link(veh_pass.input)

    if args.camera:
        cam.preview.link(veh_nn.input)
    else:
        veh_xin = pipeline.createXLinkIn()
        veh_xin.setStreamName("veh_in")
        veh_xin.out.link(veh_nn.input)

    rec_nn = pipeline.createNeuralNetwork()
    rec_nn.setBlobPath(
        str(
            blobconverter.from_zoo(
                name="license-plate-recognition-barrier-0007",
                shaves=4,
                version=openvino_version)))
    rec_nn.input.setBlocking(False)
    rec_nn.input.setQueueSize(1)
    rec_xout = pipeline.createXLinkOut()
    rec_xout.setStreamName("rec_nn")
    rec_nn.out.link(rec_xout.input)
    rec_pass = pipeline.createXLinkOut()
    rec_pass.setStreamName("rec_pass")
    rec_nn.passthrough.link(rec_pass.input)
    rec_xin = pipeline.createXLinkIn()
    rec_xin.setStreamName("rec_in")
    rec_xin.out.link(rec_nn.input)

    attr_nn = pipeline.createNeuralNetwork()
    attr_nn.setBlobPath(
        str(
            blobconverter.from_zoo(
                name="vehicle-attributes-recognition-barrier-0039",
                shaves=4,
                version=openvino_version)))
    attr_nn.input.setBlocking(False)
    attr_nn.input.setQueueSize(1)
    attr_xout = pipeline.createXLinkOut()
    attr_xout.setStreamName("attr_nn")
    attr_nn.out.link(attr_xout.input)
    attr_pass = pipeline.createXLinkOut()
    attr_pass.setStreamName("attr_pass")
    attr_nn.passthrough.link(attr_pass.input)
    attr_xin = pipeline.createXLinkIn()
    attr_xin.setStreamName("attr_in")
    attr_xin.out.link(attr_nn.input)

    print("Pipeline created.")
    return pipeline
Esempio n. 18
0
import numpy as np

# Get argument first
# Start defining a pipeline
pipeline = dai.Pipeline()

# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setInterleaved(False)

# Define a neural network that will make predictions based on the source frames
detection_nn = pipeline.createMobileNetDetectionNetwork()
detection_nn.setConfidenceThreshold(0.5)
detection_nn.setBlobPath(
    blobconverter.from_zoo(name="mobilenet-ssd", shaves=13))
cam_rgb.preview.link(detection_nn.input)

# Create outputs
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)

xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
detection_nn.out.link(xout_nn.input)

# MobilenetSSD label texts
texts = [
    "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
    "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
Esempio n. 19
0
import subprocess
import sys
import shutil
import blobconverter

use_cache = False

if not use_cache:
    shutil.rmtree(blobconverter.__defaults["output_dir"])

result = blobconverter.from_zoo(name="face-detection-retail-0004",
                                shaves=3,
                                use_cache=use_cache)
print(result)

result = blobconverter.from_caffe(
    proto=
    "../../mobilenet-ssd.prototxt",  # get from https://drive.google.com/file/d/0B3gersZ2cHIxRm5PMWRoTkdHdHc
    model=
    "../../mobilenet-ssd.caffemodel",  # get from https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/ba00fc987b3eb0ba87bb99e89bf0298a2fd10765/MobileNetSSD_deploy.prototxt
    data_type="FP16",
    shaves=5,
    use_cache=use_cache,
)
print(result)

result = blobconverter.from_openvino(
    xml=
    "../../face-detection-retail-0004.xml",  # get from https://storage.openvinotoolkit.org/repositories/open_model_zoo/2021.2/models_bin/3/face-detection-retail-0004/FP16/face-detection-retail-0004.xml
    bin=
    "../../face-detection-retail-0004.bin",  # get from https://storage.openvinotoolkit.org/repositories/open_model_zoo/2021.2/models_bin/3/face-detection-retail-0004/FP16/face-detection-retail-0004.bin
Esempio n. 20
0
def create_pipeline():
    print("Creating pipeline...")
    pipeline = dai.Pipeline()
    pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_2)
    openvino_version = '2021.2'

    print("Creating Color Camera...")
    cam = pipeline.create(dai.node.ColorCamera)
    # For ImageManip rotate you need input frame of multiple of 16
    cam.setPreviewSize(1072, 1072)
    cam.setVideoSize(VIDEO_SIZE)
    cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam.setInterleaved(False)
    cam.setBoardSocket(dai.CameraBoardSocket.RGB)
    cam.setFps(5)

    host_face_out = pipeline.create(dai.node.XLinkOut)
    host_face_out.setStreamName('frame')
    cam.video.link(host_face_out.input)

    # ImageManip that will crop the frame before sending it to the Face detection NN node
    face_det_manip = pipeline.create(dai.node.ImageManip)
    face_det_manip.initialConfig.setResize(300, 300)
    face_det_manip.initialConfig.setFrameType(dai.RawImgFrame.Type.RGB888p)

    # NeuralNetwork
    print("Creating Face Detection Neural Network...")
    face_det_nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
    face_det_nn.setConfidenceThreshold(0.5)
    face_det_nn.setBlobPath(
        blobconverter.from_zoo(name="face-detection-retail-0004",
                               shaves=6,
                               version=openvino_version))
    # Link Face ImageManip -> Face detection NN node
    face_det_manip.out.link(face_det_nn.input)

    # Script node will take the output from the face detection NN as an input and set ImageManipConfig
    # to the 'age_gender_manip' to crop the initial frame
    script = pipeline.create(dai.node.Script)

    face_det_nn.out.link(script.inputs['face_det_in'])

    face_detections_out = pipeline.create(dai.node.XLinkOut)
    face_detections_out.setStreamName('detections')
    face_det_nn.out.link(face_detections_out.input)

    # We are only interested in timestamp, so we can sync depth frames with NN output
    face_det_nn.passthrough.link(script.inputs['face_pass'])

    with open("script.py", "r") as f:
        script.setScript(f.read())

    # ImageManip as a workaround to have more frames in the pool.
    # cam.preview can only have 4 frames in the pool before it will
    # wait (freeze). Copying frames and setting ImageManip pool size to
    # higher number will fix this issue.
    copy_manip = pipeline.create(dai.node.ImageManip)
    cam.preview.link(copy_manip.inputImage)
    copy_manip.setNumFramesPool(20)
    copy_manip.setMaxOutputFrameSize(1072 * 1072 * 3)

    copy_manip.out.link(face_det_manip.inputImage)
    copy_manip.out.link(script.inputs['preview'])

    print("Creating Head pose estimation NN")
    headpose_manip = pipeline.create(dai.node.ImageManip)
    headpose_manip.setWaitForConfigInput(True)  # needed to maintain sync
    headpose_manip.initialConfig.setResize(60, 60)

    script.outputs['manip_cfg'].link(headpose_manip.inputConfig)
    script.outputs['manip_img'].link(headpose_manip.inputImage)

    headpose_nn = pipeline.create(dai.node.NeuralNetwork)
    headpose_nn.setBlobPath(
        blobconverter.from_zoo(name="head-pose-estimation-adas-0001",
                               shaves=6,
                               version=openvino_version))
    headpose_manip.out.link(headpose_nn.input)

    headpose_nn.out.link(script.inputs['headpose_in'])
    headpose_nn.passthrough.link(script.inputs['headpose_pass'])

    print("Creating face recognition ImageManip/NN")

    face_rec_manip = pipeline.create(dai.node.ImageManip)
    face_rec_manip.setWaitForConfigInput(True)  # needed to maintain sync
    face_rec_manip.initialConfig.setResize(112, 112)

    script.outputs['manip2_cfg'].link(face_rec_manip.inputConfig)
    script.outputs['manip2_img'].link(face_rec_manip.inputImage)

    face_rec_cfg_out = pipeline.create(dai.node.XLinkOut)
    face_rec_cfg_out.setStreamName('face_rec_cfg_out')
    script.outputs['manip2_cfg'].link(face_rec_cfg_out.input)

    # Only send metadata for the host-side sync
    # pass2_out = pipeline.create(dai.node.XLinkOut)
    # pass2_out.setStreamName('pass2')
    # pass2_out.setMetadataOnly(True)
    # script.outputs['manip2_img'].link(pass2_out.input)

    face_rec_nn = pipeline.create(dai.node.NeuralNetwork)
    # Removed from OMZ, so we can't use blobconverter for downloading, see here:
    # https://github.com/openvinotoolkit/open_model_zoo/issues/2448#issuecomment-851435301
    face_rec_nn.setBlobPath(
        "models/face-recognition-mobilefacenet-arcface_2021.2_4shave.blob")
    face_rec_manip.out.link(face_rec_nn.input)

    if DISPLAY_FACE:
        xout_face = pipeline.createXLinkOut()
        xout_face.setStreamName('face')
        face_rec_manip.out.link(xout_face.input)

    arc_out = pipeline.create(dai.node.XLinkOut)
    arc_out.setStreamName('arc_out')
    face_rec_nn.out.link(arc_out.input)

    return pipeline
Esempio n. 21
0
labelMap = ["background", "person" ]

# Get argument first
parser = argparse.ArgumentParser()
parser.add_argument('-nn', '--nn', type=str, help=".blob path")
parser.add_argument('-vid', '--video', type=str, help="Path to video file to be used for inference (conflicts with -cam)")
parser.add_argument('-spi', '--spi', action='store_true', default=False, help="Send tracklets to the MCU via SPI")
parser.add_argument('-cam', '--camera', action="store_true", help="Use DepthAI RGB camera for inference (conflicts with -vid)")
parser.add_argument('-t', '--threshold', default=0.25, type=float,
    help="Minimum distance the person has to move (across the x/y axis) to be considered a real movement")
args = parser.parse_args()

parentDir = Path(__file__).parent

videoPath = args.video or parentDir / Path('demo/example_01.mp4')
nnPath = args.nn or blobconverter.from_zoo(name="person-detection-retail-0013", shaves=7)

# Whether we want to use video from host or rgb camera
VIDEO = not args.camera

class TextHelper:
    def __init__(self) -> None:
        self.bg_color = (0, 0, 0)
        self.color = (255, 255, 255)
        self.text_type = cv2.FONT_HERSHEY_SIMPLEX
        self.line_type = cv2.LINE_AA
    def putText(self, frame, text, coords):
        cv2.putText(frame, text, coords, self.text_type, 1, self.bg_color, 6, self.line_type)
        cv2.putText(frame, text, coords, self.text_type, 1, self.color, 2, self.line_type)

# Start defining a pipeline
Esempio n. 22
0
Blob taken from:
https://github.com/PINTO0309/PINTO_model_zoo/tree/main/151_object_detection_mobile_object_localizer
'''

# --------------- Arguments ---------------
parser = argparse.ArgumentParser()
parser.add_argument('-t',
                    '--threshold',
                    type=float,
                    help="Coonfidence threshold",
                    default=0.2)

args = parser.parse_args()
THRESHOLD = args.threshold
NN_PATH = blobconverter.from_zoo(name="mobile_object_localizer_192x192",
                                 zoo_type="depthai")
NN_WIDTH = 192
NN_HEIGHT = 192
PREVIEW_WIDTH = 640
PREVIEW_HEIGHT = 360

# --------------- Methods ---------------


def plot_boxes(frame, boxes, colors, scores):
    color_black = (0, 0, 0)
    for i in range(boxes.shape[0]):
        box = boxes[i]
        y1 = (frame.shape[0] * box[0]).astype(int)
        y2 = (frame.shape[0] * box[2]).astype(int)
        x1 = (frame.shape[1] * box[1]).astype(int)
Esempio n. 23
0
'''

# --------------- Arguments ---------------
parser = argparse.ArgumentParser()
parser.add_argument("-w",
                    "--width",
                    help="select model width for inference",
                    default=320,
                    type=float)

args = parser.parse_args()

# choose width and height based on model
if args.width == 320:
    NN_WIDTH, NN_HEIGHT = 320, 240
    NN_PATH = blobconverter.from_zoo(name="depth_estimation_mbnv2_240x320",
                                     zoo_type="depthai")
else:
    NN_WIDTH, NN_HEIGHT = 640, 480
    NN_PATH = blobconverter.from_zoo(name="depth_estimation_mbnv2_480x640",
                                     zoo_type="depthai")

# --------------- Pipeline ---------------
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)

# Define a neural network
detection_nn = pipeline.createNeuralNetwork()
detection_nn.setBlobPath(str(NN_PATH))
detection_nn.setNumPoolFrames(4)
detection_nn.input.setBlocking(False)
Esempio n. 24
0
# Send color frames to the host via XLink
cam_xout = p.create(dai.node.XLinkOut)
cam_xout.setStreamName("video")
cam.video.link(cam_xout.input)

# Crop 720x720 -> 300x300
face_det_manip = p.create(dai.node.ImageManip)
face_det_manip.initialConfig.setResize(300, 300)
cam.preview.link(face_det_manip.inputImage)

# NN that detects faces in the image
face_nn = p.create(dai.node.MobileNetDetectionNetwork)
face_nn.setConfidenceThreshold(0.3)
face_nn.setBlobPath(
    blobconverter.from_zoo("face-detection-retail-0004",
                           shaves=6,
                           version=openvinoVersion))
face_det_manip.out.link(face_nn.input)

# Send ImageManipConfig to host so it can visualize the landmarks
config_xout = p.create(dai.node.XLinkOut)
config_xout.setStreamName("face_det")
face_nn.out.link(config_xout.input)

# Script node will take the output from the NN as an input, get the first bounding box
# and send ImageManipConfig to the manip_crop
image_manip_script = p.create(dai.node.Script)
face_nn.out.link(image_manip_script.inputs['nn_in'])
cam.preview.link(image_manip_script.inputs['frame'])
image_manip_script.setScript("""
import time
Esempio n. 25
0
# Pipeline tells DepthAI what operations to perform when running - you define all of the resources used and flows here
pipeline = depthai.Pipeline()

# First, we want the Color camera as the output
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(
    300, 300
)  # 300x300 will be the preview frame size, available as 'preview' output of the node
cam_rgb.setInterleaved(False)

# Next, we want a neural network that will produce the detections
detection_nn = pipeline.createMobileNetDetectionNetwork()
# Blob is the Neural Network file, compiled for MyriadX. It contains both the definition and weights of the model
# We're using a blobconverter tool to retreive the MobileNetSSD blob automatically from OpenVINO Model Zoo
detection_nn.setBlobPath(
    str(blobconverter.from_zoo(name='mobilenet-ssd', shaves=6)))
# Next, we filter out the detections that are below a confidence threshold. Confidence can be anywhere between <0..1>
detection_nn.setConfidenceThreshold(0.5)
# Next, we link the camera 'preview' output to the neural network detection input, so that it can produce detections
cam_rgb.preview.link(detection_nn.input)

# XLinkOut is a "way out" from the device. Any data you want to transfer to host need to be send via XLink
xout_rgb = pipeline.createXLinkOut()
# For the rgb camera output, we want the XLink stream to be named "rgb"
xout_rgb.setStreamName("rgb")
# Linking camera preview to XLink input, so that the frames will be sent to host
cam_rgb.preview.link(xout_rgb.input)

# The same XLinkOut mechanism will be used to receive nn results
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
Esempio n. 26
0
                    help='Show output')
parser.add_argument(
    '-sp',
    '--save_path',
    type=str,
    default='',
    help='Path to save the output. If None output won\'t be saved')
parser.add_argument('-s',
                    '--sync',
                    action="store_true",
                    help="Sync RGB output with NN output",
                    default=False)
args = parser.parse_args()

if args.model is None:
    args.model = str(blobconverter.from_zoo(name="mobilenet-ssd", shaves=7))

# Create pipeline
pipeline = dai.Pipeline()

# Define a neural network that will make predictions based on the source frames
nn = pipeline.createMobileNetDetectionNetwork()
nn.setConfidenceThreshold(0.5)
nn.setBlobPath(args.model)
nn.setNumInferenceThreads(2)
nn.input.setBlocking(False)

# Define a source for the neural network input
if args.video_path != '':
    # Create XLinkIn object as conduit for sending input video file frames
    # to the neural network
Esempio n. 27
0
camRgb = pipeline.createColorCamera()
camRgb.setPreviewSize(NN_SIZE)
camRgb.setInterleaved(False)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
camRgb.setIspScale(1,
                   3)  # You don't need to downscale (4k -> 720P) video frames

xoutFrames = pipeline.createXLinkOut()
xoutFrames.setStreamName("frames")
camRgb.video.link(xoutFrames.input)

# Define a neural network that will make predictions based on the source frames
nn = pipeline.createMobileNetDetectionNetwork()
nn.setConfidenceThreshold(0.5)
nn.setBlobPath(blobconverter.from_zoo(name="mobilenet-ssd", shaves=6))
camRgb.preview.link(nn.input)

passthroughOut = pipeline.createXLinkOut()
passthroughOut.setStreamName("pass")
nn.passthrough.link(passthroughOut.input)

nnOut = pipeline.createXLinkOut()
nnOut.setStreamName("nn")
nn.out.link(nnOut.input)

# Connect to device and start pipeline
with dai.Device(pipeline) as device:
    qFrames = device.getOutputQueue(name="frames")
    qPass = device.getOutputQueue(name="pass")
    qDet = device.getOutputQueue(name="nn")
Esempio n. 28
0
VIDEO_SOURCE = args.video_path
#NN_PATH = args.nn_model
CONFIDENCE_THRESHOLD = args.confidence_thresh
IOU_THRESHOLD = args.iou_thresh

# resize input to smaller size for faster inference
NN_WIDTH = 320
NN_HEIGHT = 320

# set initial resize so the input is not too large
IR_WIDTH = 640
IR_HEIGHT = 360

# --------------- Get Blob ------------------
NN_PATH = blobconverter.from_zoo(name="yolop_320x320", zoo_type="depthai")

# --------------- Check input ---------------
vid_path = Path(VIDEO_SOURCE)
if not vid_path.is_file():
    raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
                            VIDEO_SOURCE)

# --------------- Pipeline ---------------
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.VERSION_2021_4)

# Create Manip for image resizing and NN for count inference
manip = pipeline.createImageManip()
detection_nn = pipeline.createNeuralNetwork()
Esempio n. 29
0
if not args.camera and not args.video:
    raise RuntimeError(
        "No source selected. Please use either \"-cam\" to use RGB camera as a source or \"-vid <path>\" to run on video"
    )

debug = not args.no_debug
camera = not args.video
labels = class_names()

# Start defining a pipeline
pipeline = dai.Pipeline()

# NeuralNetwork
print("Creating Neural Network...")
detection_nn = pipeline.createNeuralNetwork()
detection_nn.setBlobPath(str(blobconverter.from_zoo(name="efficientnet-b0")))

if camera:
    print("Creating Color Camera...")
    cam_rgb = pipeline.createColorCamera()
    cam_rgb.setPreviewSize(224, 224)
    cam_rgb.setInterleaved(False)
    cam_rgb.setResolution(
        dai.ColorCameraProperties.SensorResolution.THE_1080_P)
    cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
    cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)

    cam_xout = pipeline.createXLinkOut()
    cam_xout.setStreamName("rgb")
    cam_rgb.preview.link(cam_xout.input)
    cam_rgb.preview.link(detection_nn.input)
Esempio n. 30
0
import numpy as np

# Get argument first
# Start defining a pipeline
pipeline = dai.Pipeline()


# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setInterleaved(False)

# Define a neural network that will make predictions based on the source frames
detection_nn = pipeline.createMobileNetDetectionNetwork()
detection_nn.setConfidenceThreshold(0.5)
detection_nn.setBlobPath(str(blobconverter.from_zoo(name="mobilenet-ssd", shaves=13)))
cam_rgb.preview.link(detection_nn.input)

# Create outputs
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)

xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
detection_nn.out.link(xout_nn.input)

# MobilenetSSD label texts
texts = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
         "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]