Esempio n. 1
0
    def startLoop(self):
        cliArgs = CliArgs()
        args = vars(cliArgs.parse_args())

        configMan = DepthConfigManager(args)
        if is_rpi and args['pointcloud']:
            raise NotImplementedError(
                "Point cloud visualization is currently not supported on RPI")
        # these are largely for debug and dev.
        cmd_file, debug_mode = configMan.getCommandFile()
        usb2_mode = configMan.getUsb2Mode()

        # decode_nn and show_nn are functions that are dependent on the neural network that's being run.
        decode_nn = configMan.decode_nn
        show_nn = configMan.show_nn

        # Labels for the current neural network. They are parsed from the blob config file.
        labels = configMan.labels
        NN_json = configMan.NN_config

        # This json file is sent to DepthAI. It communicates what options you'd like to enable and what model you'd like to run.
        config = configMan.jsonConfig

        # Create a list of enabled streams ()
        stream_names = [
            stream if isinstance(stream, str) else stream['name']
            for stream in configMan.stream_list
        ]

        enable_object_tracker = 'object_tracker' in stream_names

        # grab video file, if option exists
        video_file = configMan.video_file

        self.device = None
        if debug_mode:
            print('Cmd file: ', cmd_file, ' args["device_id"]: ',
                  args['device_id'])
            self.device = depthai.Device(cmd_file, args['device_id'])
        else:
            self.device = depthai.Device(args['device_id'], usb2_mode)

        print(stream_names)
        print('Available streams: ' + str(self.device.get_available_streams()))

        # create the pipeline, here is the first connection with the device
        p = self.device.create_pipeline(config=config)

        if p is None:
            print('Pipeline is not created.')
            exit(3)

        nn2depth = self.device.get_nn_to_depth_bbox_mapping()

        t_start = time()
        frame_count = {}
        frame_count_prev = {}
        nnet_prev = {}
        nnet_prev["entries_prev"] = {}
        nnet_prev["nnet_source"] = {}
        frame_count['nn'] = {}
        frame_count_prev['nn'] = {}

        NN_cams = {'rgb', 'left', 'right'}

        for cam in NN_cams:
            nnet_prev["entries_prev"][cam] = None
            nnet_prev["nnet_source"][cam] = None
            frame_count['nn'][cam] = 0
            frame_count_prev['nn'][cam] = 0

        stream_windows = []
        for s in stream_names:
            if s == 'previewout':
                for cam in NN_cams:
                    stream_windows.append(s + '-' + cam)
            else:
                stream_windows.append(s)

        for w in stream_windows:
            frame_count[w] = 0
            frame_count_prev[w] = 0

        tracklets = None

        self.reset_process_wd()

        time_start = time()

        def print_packet_info_header():
            print(
                '[hostTimestamp streamName] devTstamp seq camSrc width height Bpp'
            )

        def print_packet_info(packet, stream_name):
            meta = packet.getMetadata()
            print("[{:.6f} {:15s}]".format(time() - time_start, stream_name),
                  end='')
            if meta is not None:
                source = meta.getCameraName()
                if stream_name.startswith(
                        'disparity') or stream_name.startswith('depth'):
                    source += '(rectif)'
                print(" {:.6f}".format(meta.getTimestamp()),
                      meta.getSequenceNum(),
                      source,
                      end='')
                print('',
                      meta.getFrameWidth(),
                      meta.getFrameHeight(),
                      meta.getFrameBytesPP(),
                      end='')
            print()
            return

        def keypress_handler(self, key, stream_names):
            cams = ['rgb', 'mono']
            self.cam_idx = getattr(self, 'cam_idx', 0)  # default: 'rgb'
            cam = cams[self.cam_idx]
            cam_c = depthai.CameraControl.CamId.RGB
            cam_l = depthai.CameraControl.CamId.LEFT
            cam_r = depthai.CameraControl.CamId.RIGHT
            cmd_ae_region = depthai.CameraControl.Command.AE_REGION
            cmd_exp_comp = depthai.CameraControl.Command.EXPOSURE_COMPENSATION
            cmd_set_focus = depthai.CameraControl.Command.MOVE_LENS
            cmd_set_exp = depthai.CameraControl.Command.AE_MANUAL
            keypress_handler_lut = {
                ord('f'):
                lambda: self.device.request_af_trigger(),
                ord('1'):
                lambda: self.device.request_af_mode(depthai.AutofocusMode.
                                                    AF_MODE_AUTO),
                ord('2'):
                lambda: self.device.request_af_mode(depthai.AutofocusMode.
                                                    AF_MODE_CONTINUOUS_VIDEO),
                # 5,6,7,8,9,0: short example for using ISP 3A controls for Mono cameras
                ord('5'):
                lambda: self.device.send_camera_control(
                    cam_l, cmd_ae_region, '0 0 200 200 1'),
                ord('6'):
                lambda: self.device.send_camera_control(
                    cam_l, cmd_ae_region, '1000 0 200 200 1'),
                ord('7'):
                lambda: self.device.send_camera_control(
                    cam_l, cmd_exp_comp, '-2'),
                ord('8'):
                lambda: self.device.send_camera_control(
                    cam_l, cmd_exp_comp, '+2'),
                ord('9'):
                lambda: self.device.send_camera_control(
                    cam_r, cmd_exp_comp, '-2'),
                ord('0'):
                lambda: self.device.send_camera_control(
                    cam_r, cmd_exp_comp, '+2'),
            }
            if key in keypress_handler_lut:
                keypress_handler_lut[key]()
            elif key == ord('c'):
                if 'jpegout' in stream_names:
                    self.device.request_jpeg()
                else:
                    print(
                        "'jpegout' stream not enabled. Try settings -s jpegout to enable it"
                    )
            elif key == ord(
                    's'):  # switch selected camera for manual exposure control
                self.cam_idx = (self.cam_idx + 1) % len(cams)
                print("======================= Current camera to control:",
                      cams[self.cam_idx])
            # RGB manual focus/exposure controls:
            # Control:      key[dec/inc]  min..max
            # exposure time:     i   o    1..33333 [us]
            # sensitivity iso:   k   l    100..1600
            # focus:             ,   .    0..255 [far..near]
            elif key == ord('i') or key == ord('o') or key == ord(
                    'k') or key == ord('l'):
                max_exp_us = int(1000 * 1000 / config['camera'][cam]['fps'])
                self.rgb_exp = getattr(self, 'rgb_exp', 20000)  # initial
                self.rgb_iso = getattr(self, 'rgb_iso', 800)  # initial
                rgb_iso_step = 50
                rgb_exp_step = max_exp_us // 20  # split in 20 steps
                if key == ord('i'): self.rgb_exp -= rgb_exp_step
                if key == ord('o'): self.rgb_exp += rgb_exp_step
                if key == ord('k'): self.rgb_iso -= rgb_iso_step
                if key == ord('l'): self.rgb_iso += rgb_iso_step
                if self.rgb_exp < 1: self.rgb_exp = 1
                if self.rgb_exp > max_exp_us: self.rgb_exp = max_exp_us
                if self.rgb_iso < 100: self.rgb_iso = 100
                if self.rgb_iso > 1600: self.rgb_iso = 1600
                print("===================================", cam,
                      "set exposure:", self.rgb_exp, "iso:", self.rgb_iso)
                exp_arg = str(self.rgb_exp) + ' ' + str(
                    self.rgb_iso) + ' 33333'
                if cam == 'rgb':
                    self.device.send_camera_control(cam_c, cmd_set_exp,
                                                    exp_arg)
                elif cam == 'mono':
                    self.device.send_camera_control(cam_l, cmd_set_exp,
                                                    exp_arg)
                    self.device.send_camera_control(cam_r, cmd_set_exp,
                                                    exp_arg)
            elif key == ord(',') or key == ord('.'):
                self.rgb_manual_focus = getattr(self, 'rgb_manual_focus',
                                                200)  # initial
                rgb_focus_step = 3
                if key == ord(','): self.rgb_manual_focus -= rgb_focus_step
                if key == ord('.'): self.rgb_manual_focus += rgb_focus_step
                if self.rgb_manual_focus < 0: self.rgb_manual_focus = 0
                if self.rgb_manual_focus > 255: self.rgb_manual_focus = 255
                print(
                    "========================================== RGB set focus:",
                    self.rgb_manual_focus)
                focus_arg = str(self.rgb_manual_focus)
                self.device.send_camera_control(cam_c, cmd_set_focus,
                                                focus_arg)
            return

        for stream in stream_names:
            if stream in ["disparity", "disparity_color", "depth"]:
                cv2.namedWindow(stream)
                trackbar_name = 'Disparity confidence'
                conf_thr_slider_min = 0
                conf_thr_slider_max = 255
                cv2.createTrackbar(trackbar_name, stream, conf_thr_slider_min,
                                   conf_thr_slider_max,
                                   self.on_trackbar_change)
                cv2.setTrackbarPos(trackbar_name, stream,
                                   args['disparity_confidence_threshold'])

        right_rectified = None
        pcl_converter = None

        ops = 0
        prevTime = time()
        if args['verbose']: print_packet_info_header()
        while self.runThread:
            # retreive data from the device
            # data is stored in packets, there are nnet (Neural NETwork) packets which have additional functions for NNet result interpretation
            self.nnet_packets, self.data_packets = p.get_available_nnet_and_data_packets(
                blocking=True)

            ### Uncomment to print ops
            # ops = ops + 1
            # if time() - prevTime > 1.0:
            #     print('OPS: ', ops)
            #     ops = 0
            #     prevTime = time()

            packets_len = len(self.nnet_packets) + len(self.data_packets)
            if packets_len != 0:
                self.reset_process_wd()
            else:
                cur_time = monotonic()
                if cur_time > wd_cutoff:
                    print("process watchdog timeout")
                    os._exit(10)

            for _, nnet_packet in enumerate(self.nnet_packets):
                if args['verbose']: print_packet_info(nnet_packet, 'NNet')

                meta = nnet_packet.getMetadata()
                camera = 'rgb'
                if meta != None:
                    camera = meta.getCameraName()
                nnet_prev["nnet_source"][camera] = nnet_packet
                nnet_prev["entries_prev"][camera] = decode_nn(nnet_packet,
                                                              config=config,
                                                              NN_json=NN_json)
                frame_count['metaout'] += 1
                frame_count['nn'][camera] += 1

            for packet in self.data_packets:
                window_name = packet.stream_name
                if packet.stream_name not in stream_names:
                    continue  # skip streams that were automatically added
                if args['verbose']:
                    print_packet_info(packet, packet.stream_name)
                packetData = packet.getData()
                if packetData is None:
                    print('Invalid packet data!')
                    continue
                elif packet.stream_name == 'previewout':
                    meta = packet.getMetadata()
                    camera = 'rgb'
                    if meta != None:
                        camera = meta.getCameraName()

                    window_name = 'previewout-' + camera
                    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
                    # the format of previewout image is CHW (Chanel, Height, Width), but OpenCV needs HWC, so we
                    # change shape (3, 300, 300) -> (300, 300, 3)
                    data0 = packetData[0, :, :]
                    data1 = packetData[1, :, :]
                    data2 = packetData[2, :, :]
                    frame = cv2.merge([data0, data1, data2])
                    if nnet_prev["entries_prev"][camera] is not None:
                        frame = show_nn(nnet_prev["entries_prev"][camera],
                                        frame,
                                        NN_json=NN_json,
                                        config=config)
                        if enable_object_tracker and tracklets is not None:
                            frame = show_tracklets(tracklets, frame, labels)
                    cv2.putText(frame,
                                "fps: " + str(frame_count_prev[window_name]),
                                (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                (0, 0, 0))
                    cv2.putText(
                        frame,
                        "NN fps: " + str(frame_count_prev['nn'][camera]),
                        (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                        (0, 255, 0))
                    cv2.imshow(window_name, frame)
                elif packet.stream_name in [
                        'left', 'right', 'disparity', 'rectified_left',
                        'rectified_right'
                ]:
                    frame_bgr = packetData
                    if args['pointcloud'] and packet.stream_name == 'rectified_right':
                        right_rectified = packetData
                    cv2.putText(frame_bgr, packet.stream_name, (25, 25),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                    cv2.putText(frame_bgr,
                                "fps: " + str(frame_count_prev[window_name]),
                                (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                (0, 0, 0))
                    if args['draw_bb_depth']:
                        camera = args['cnn_camera']
                        if packet.stream_name == 'disparity':
                            if camera == 'left_right':
                                camera = 'right'
                        elif camera != 'rgb':
                            camera = packet.getMetadata().getCameraName()
                        if nnet_prev["entries_prev"][camera] is not None:
                            frame_bgr = show_nn(
                                nnet_prev["entries_prev"][camera],
                                frame_bgr,
                                NN_json=NN_json,
                                config=config,
                                nn2depth=nn2depth)
                    cv2.imshow(window_name, frame_bgr)
                elif packet.stream_name.startswith(
                        'depth') or packet.stream_name == 'disparity_color':
                    frame = packetData

                    if len(frame.shape) == 2:
                        if frame.dtype == np.uint8:  # grayscale
                            cv2.putText(frame, packet.stream_name, (25, 25),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                        (0, 0, 255))
                            cv2.putText(
                                frame,
                                "fps: " + str(frame_count_prev[window_name]),
                                (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                (0, 0, 255))
                        else:  # uint16
                            if args['pointcloud'] and "depth" in stream_names and "rectified_right" in stream_names and right_rectified is not None:
                                try:
                                    from depthai_helpers.projector_3d import PointCloudVisualizer
                                except ImportError as e:
                                    raise ImportError(
                                        f"\033[1;5;31mError occured when importing PCL projector: {e} \033[0m "
                                    )
                                if pcl_converter is None:
                                    pcl_converter = PointCloudVisualizer(
                                        self.device.get_right_intrinsic(),
                                        1280, 720)
                                right_rectified = cv2.flip(right_rectified, 1)
                                pcl_converter.rgbd_to_projection(
                                    frame, right_rectified)
                                pcl_converter.visualize_pcd()

                            frame = (65535 // frame).astype(np.uint8)
                            # colorize depth map, comment out code below to obtain grayscale
                            frame = cv2.applyColorMap(frame, cv2.COLORMAP_HOT)
                            # frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET)
                            cv2.putText(frame, packet.stream_name, (25, 25),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
                            cv2.putText(
                                frame,
                                "fps: " + str(frame_count_prev[window_name]),
                                (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
                    else:  # bgr
                        cv2.putText(frame, packet.stream_name, (25, 25),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                    (255, 255, 255))
                        cv2.putText(
                            frame,
                            "fps: " + str(frame_count_prev[window_name]),
                            (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)

                    if args['draw_bb_depth']:
                        camera = args['cnn_camera']
                        if camera == 'left_right':
                            camera = 'right'
                        if nnet_prev["entries_prev"][camera] is not None:
                            frame = show_nn(nnet_prev["entries_prev"][camera],
                                            frame,
                                            NN_json=NN_json,
                                            config=config,
                                            nn2depth=nn2depth)
                    cv2.imshow(window_name, frame)

                elif packet.stream_name == 'jpegout':
                    jpg = packetData
                    mat = cv2.imdecode(jpg, cv2.IMREAD_COLOR)
                    cv2.imshow('jpegout', mat)

                elif packet.stream_name == 'video':
                    videoFrame = packetData
                    videoFrame.tofile(video_file)
                    # mjpeg = packetData
                    # mat = cv2.imdecode(mjpeg, cv2.IMREAD_COLOR)
                    # cv2.imshow('mjpeg', mat)
                elif packet.stream_name == 'color':
                    meta = packet.getMetadata()
                    w = meta.getFrameWidth()
                    h = meta.getFrameHeight()
                    yuv420p = packetData.reshape((h * 3 // 2, w))
                    bgr = cv2.cvtColor(yuv420p, cv2.COLOR_YUV2BGR_IYUV)
                    scale = configMan.getColorPreviewScale()
                    bgr = cv2.resize(bgr, (int(w * scale), int(h * scale)),
                                     interpolation=cv2.INTER_AREA)
                    cv2.putText(bgr, packet.stream_name, (25, 25),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                    cv2.putText(bgr,
                                "fps: " + str(frame_count_prev[window_name]),
                                (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                (0, 0, 0))
                    cv2.imshow("color", bgr)

                elif packet.stream_name == 'meta_d2h':
                    str_ = packet.getDataAsStr()
                    dict_ = json.loads(str_)

                    print(
                        'meta_d2h Temp', ' CSS:' + '{:6.2f}'.format(
                            dict_['sensors']['temperature']['css']),
                        ' MSS:' + '{:6.2f}'.format(
                            dict_['sensors']['temperature']['mss']),
                        ' UPA:' + '{:6.2f}'.format(
                            dict_['sensors']['temperature']['upa0']),
                        ' DSS:' + '{:6.2f}'.format(
                            dict_['sensors']['temperature']['upa1']))
                elif packet.stream_name == 'object_tracker':
                    tracklets = packet.getObjectTracker()

                frame_count[window_name] += 1

            t_curr = time()
            if t_start + 1.0 < t_curr:
                t_start = t_curr
                # print("metaout fps: " + str(frame_count_prev["metaout"]))

                stream_windows = []
                for s in stream_names:
                    if s == 'previewout':
                        for cam in NN_cams:
                            stream_windows.append(s + '-' + cam)
                            frame_count_prev['nn'][cam] = frame_count['nn'][
                                cam]
                            frame_count['nn'][cam] = 0
                    else:
                        stream_windows.append(s)
                for w in stream_windows:
                    frame_count_prev[w] = frame_count[w]
                    frame_count[w] = 0

            key = cv2.waitKey(1)
            if key == ord('q'):
                break
            else:
                keypress_handler(self, key, stream_names)

        del p  # in order to stop the pipeline object should be deleted, otherwise device will continue working. This is required if you are going to add code after the main loop, otherwise you can ommit it.
        del self.device
        cv2.destroyAllWindows()

        # Close video output file if was opened
        if video_file is not None:
            video_file.close()

        print('py: DONE.')
    def __init__(self):
        # note the topic name will change if a device is specified. the name will be <stream>+<device_id>. eg preview3.
        super().__init__('depthai_subscriber')

        # setup our params
        self.paramName = "cliArgs"
        paramDefault = ""
        paramDesc = ParameterDescriptor(
            type=ParameterType.PARAMETER_STRING,
            description=
            "arguments that match the command line script depthai-demo.py input"
        )
        self.declare_parameter(self.paramName, paramDefault, paramDesc)

        # get param
        my_param = self.get_parameter(
            self.paramName).get_parameter_value().string_value
        new_argv = [sys.argv[0]]
        if my_param is not "":
            paramList = my_param.split(" ")
            for param in paramList:
                new_argv.append(param)

        # setup argv for parsing.
        self.old_argv = sys.argv
        sys.argv = new_argv

        # parse params
        cliArgs = CliArgs()
        self.args = vars(cliArgs.parse_args())
        print(self.args)

        self.configMan = DepthConfigManager(self.args)

        # "." is not allowed in topic names so we replaced "." with "_" on the server.
        self.targetDev = self.args['device_id'].replace(".", "_")

        # get nn2depth params, if necessary.
        if self.args['draw_bb_depth']:
            self.nn2depth = self.get_nn2depth_sync(self.targetDev)
            print(self.nn2depth)

        # subscribe to topics
        self.previewSubName = 'preview' + self.targetDev
        self.previewSub = self.create_subscription(UInt8MultiArray,
                                                   self.previewSubName,
                                                   self.preview_callback, 10)
        self.previewSub  # prevent unused variable warning

        self.leftSubName = 'left' + self.targetDev
        self.leftSub = self.create_subscription(UInt8MultiArray,
                                                self.leftSubName,
                                                self.left_callback, 10)
        self.leftSub  # prevent unused variable warning

        self.rightSubName = 'right' + self.targetDev
        self.rightSub = self.create_subscription(UInt8MultiArray,
                                                 self.rightSubName,
                                                 self.right_callback, 10)
        self.rightSub  # prevent unused variable warning

        self.disparitySubName = 'disparity' + self.targetDev
        self.disparitySub = self.create_subscription(UInt8MultiArray,
                                                     self.disparitySubName,
                                                     self.disparity_callback,
                                                     10)
        self.disparitySub  # prevent unused variable warning

        self.depthSubName = 'depth' + self.targetDev
        self.depthSub = self.create_subscription(UInt8MultiArray,
                                                 self.depthSubName,
                                                 self.depth_callback, 10)
        self.depthSub  # prevent unused variable warning

        self.d2hSubName = 'd2h' + self.targetDev
        self.d2hSub = self.create_subscription(String, self.d2hSubName,
                                               self.d2h_callback, 10)
        self.depthSub  # prevent unused variable warning

        self.metaSubName = 'meta' + self.targetDev
        self.metaSub = self.create_subscription(String, self.metaSubName,
                                                self.meta_callback, 10)
        self.metaSub  # prevent unused variable warning
class DepthAISubscriber(Node):
    meta = []

    def __init__(self):
        # note the topic name will change if a device is specified. the name will be <stream>+<device_id>. eg preview3.
        super().__init__('depthai_subscriber')

        # setup our params
        self.paramName = "cliArgs"
        paramDefault = ""
        paramDesc = ParameterDescriptor(
            type=ParameterType.PARAMETER_STRING,
            description=
            "arguments that match the command line script depthai-demo.py input"
        )
        self.declare_parameter(self.paramName, paramDefault, paramDesc)

        # get param
        my_param = self.get_parameter(
            self.paramName).get_parameter_value().string_value
        new_argv = [sys.argv[0]]
        if my_param is not "":
            paramList = my_param.split(" ")
            for param in paramList:
                new_argv.append(param)

        # setup argv for parsing.
        self.old_argv = sys.argv
        sys.argv = new_argv

        # parse params
        cliArgs = CliArgs()
        self.args = vars(cliArgs.parse_args())
        print(self.args)

        self.configMan = DepthConfigManager(self.args)

        # "." is not allowed in topic names so we replaced "." with "_" on the server.
        self.targetDev = self.args['device_id'].replace(".", "_")

        # get nn2depth params, if necessary.
        if self.args['draw_bb_depth']:
            self.nn2depth = self.get_nn2depth_sync(self.targetDev)
            print(self.nn2depth)

        # subscribe to topics
        self.previewSubName = 'preview' + self.targetDev
        self.previewSub = self.create_subscription(UInt8MultiArray,
                                                   self.previewSubName,
                                                   self.preview_callback, 10)
        self.previewSub  # prevent unused variable warning

        self.leftSubName = 'left' + self.targetDev
        self.leftSub = self.create_subscription(UInt8MultiArray,
                                                self.leftSubName,
                                                self.left_callback, 10)
        self.leftSub  # prevent unused variable warning

        self.rightSubName = 'right' + self.targetDev
        self.rightSub = self.create_subscription(UInt8MultiArray,
                                                 self.rightSubName,
                                                 self.right_callback, 10)
        self.rightSub  # prevent unused variable warning

        self.disparitySubName = 'disparity' + self.targetDev
        self.disparitySub = self.create_subscription(UInt8MultiArray,
                                                     self.disparitySubName,
                                                     self.disparity_callback,
                                                     10)
        self.disparitySub  # prevent unused variable warning

        self.depthSubName = 'depth' + self.targetDev
        self.depthSub = self.create_subscription(UInt8MultiArray,
                                                 self.depthSubName,
                                                 self.depth_callback, 10)
        self.depthSub  # prevent unused variable warning

        self.d2hSubName = 'd2h' + self.targetDev
        self.d2hSub = self.create_subscription(String, self.d2hSubName,
                                               self.d2h_callback, 10)
        self.depthSub  # prevent unused variable warning

        self.metaSubName = 'meta' + self.targetDev
        self.metaSub = self.create_subscription(String, self.metaSubName,
                                                self.meta_callback, 10)
        self.metaSub  # prevent unused variable warning

    def get_nn2depth_sync(self, device_id):
        req = GetParameters.Request()
        req.names = ['nn2depth' + device_id]
        client = self.create_client(GetParameters,
                                    '/depthai_publisher/get_parameters')
        while not client.wait_for_service(timeout_sec=1.0):
            self.get_logger().info(
                'service not available, nn2depth specified so depthai_wrapper needs to be running before starting a listener.'
            )

        self.future = client.call_async(req)
        rclpy.spin_until_future_complete(self, self.future)
        if self.future.done():
            try:
                response = self.future.result()
            except Exception as e:
                self.get_logger().info('Service call failed %r' % (e, ))
            else:
                return response.values[0].string_value

        return ""

    def preview_callback(self, msg):
        serializedFrame = bytearray(msg.data)
        frame = msgpack.unpackb(serializedFrame, object_hook=m.decode)
        if self.meta:
            nn_frame = self.configMan.show_nn(self.meta,
                                              frame,
                                              labels=self.configMan.labels,
                                              NN_json=self.configMan.NN_config,
                                              config=self.configMan.jsonConfig)
            cv2.imshow(self.previewSubName, nn_frame)
            key = cv2.waitKey(1)

    def left_callback(self, msg):
        self.left_right_disparity_callback(msg, self.leftSubName)

    def right_callback(self, msg):
        self.left_right_disparity_callback(msg, self.rightSubName)

    def disparity_callback(self, msg):
        self.left_right_disparity_callback(msg, self.disparitySubName)

    def left_right_disparity_callback(self, msg, streamName):
        serializedFrame = bytearray(msg.data)
        if len(serializedFrame) > 0:
            frame = msgpack.unpackb(serializedFrame, object_hook=m.decode)

            if self.args['draw_bb_depth']:
                camera = self.args['cnn_camera']
                if streamName == 'disparity':
                    if camera == 'left_right':
                        camera = 'right'
                elif camera != 'rgb':
                    camera = packet.getMetadata().getCameraName()

                if self.meta:
                    self.configMan.show_nn(self.meta,
                                           frame,
                                           labels=self.configMan.labels,
                                           NN_json=self.configMan.NN_config,
                                           config=self.configMan.jsonConfig)

            cv2.imshow(streamName, frame)
            key = cv2.waitKey(1)

    def depth_callback(self, msg):
        serializedFrame = bytearray(msg.data)
        frame = msgpack.unpackb(serializedFrame, object_hook=m.decode)

        if len(frame.shape) == 2:
            if frame.dtype == np.uint8:  # grayscale
                cv2.putText(frame, self.depthSubName, (25, 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
            else:  # uint16
                frame = (65535 // frame).astype(np.uint8)
                #colorize depth map, comment out code below to obtain grayscale
                frame = cv2.applyColorMap(frame, cv2.COLORMAP_HOT)
                # frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET)
                cv2.putText(frame, self.depthSubName, (25, 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
        else:  # bgr
            cv2.putText(frame, self.depthSubName, (25, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))

        if self.args['draw_bb_depth']:
            camera = self.args['cnn_camera']
            if camera == 'left_right':
                camera = 'right'
            if self.meta:
                self.configMan.show_nn(self.meta,
                                       frame,
                                       labels=self.configMan.labels,
                                       NN_json=self.configMan.NN_config,
                                       config=self.configMan.jsonConfig,
                                       nn2depth=self.nn2depth)
        cv2.imshow(self.depthSubName, frame)
        key = cv2.waitKey(1)

    def d2h_callback(self, msg):
        dict_ = json.loads(msg.data)
        print(
            'meta_d2h Temp',
            ' CSS:' + '{:6.2f}'.format(dict_['sensors']['temperature']['css']),
            ' MSS:' + '{:6.2f}'.format(dict_['sensors']['temperature']['mss']),
            ' UPA:' +
            '{:6.2f}'.format(dict_['sensors']['temperature']['upa0']),
            ' DSS:' +
            '{:6.2f}'.format(dict_['sensors']['temperature']['upa1']))

    def meta_callback(self, msg):
        # TODO: probably want to add something smarter to handle these cases within valid data.
        msg.data = msg.data.replace("\'", "\"")
        msg.data = msg.data.replace(": None", ": \"None\"")
        self.meta = json.loads(msg.data)
        def startLoop(self):
                cliArgs = CliArgs()
                args = vars(cliArgs.parse_args())

                configMan = DepthConfigManager(args)
                if is_rpi and args['pointcloud']:
                        raise NotImplementedError("Point cloud visualization is currently not supported on RPI")
                # these are largely for debug and dev.
                cmd_file, debug_mode = configMan.getCommandFile()
                usb2_mode = configMan.getUsb2Mode()

                # decode_nn and show_nn are functions that are dependent on the neural network that's being run.
                decode_nn = configMan.decode_nn
                show_nn = configMan.show_nn

                # Labels for the current neural network. They are parsed from the blob config file.
                labels = configMan.labels
                NN_json = configMan.NN_config

                # This json file is sent to DepthAI. It communicates what options you'd like to enable and what model you'd like to run.
                config = configMan.jsonConfig

                # Create a list of enabled streams ()
                stream_names = [stream if isinstance(stream, str) else stream['name'] for stream in configMan.stream_list]

                enable_object_tracker = 'object_tracker' in stream_names

                # grab video file, if option exists
                video_file = configMan.video_file


                self.device = None
                if debug_mode:
                        print('Cmd file: ', cmd_file, ' args["device_id"]: ', args['device_id'])
                        self.device = depthai.Device(cmd_file, args['device_id'])
                else:
                        self.device = depthai.Device(args['device_id'], usb2_mode)

                print(stream_names)
                print('Available streams: ' + str(self.device.get_available_streams()))

                # create the pipeline, here is the first connection with the device
                p = self.device.create_pipeline(config=config)

                if p is None:
                        print('Pipeline is not created.')
                        exit(3)


                nn2depth = self.device.get_nn_to_depth_bbox_mapping()

                t_start = time()
                frame_count = {}
                frame_count_prev = {}
                nnet_prev = {}
                nnet_prev["entries_prev"] = {}
                nnet_prev["nnet_source"] = {}
                frame_count['nn'] = {}
                frame_count_prev['nn'] = {}

                NN_cams = {'rgb', 'left', 'right'}

                for cam in NN_cams:
                        nnet_prev["entries_prev"][cam] = None
                        nnet_prev["nnet_source"][cam] = None
                        frame_count['nn'][cam] = 0
                        frame_count_prev['nn'][cam] = 0

                stream_windows = []
                for s in stream_names:
                        if s == 'previewout':
                                for cam in NN_cams:
                                        stream_windows.append(s + '-' + cam)
                        else:
                                stream_windows.append(s)

                for w in stream_windows:
                        frame_count[w] = 0
                        frame_count_prev[w] = 0

                tracklets = None

                self.reset_process_wd()

                time_start = time()
                def print_packet_info(packet):
                        meta = packet.getMetadata()
                        print("[{:.6f} {:15s}]".format(time()-time_start, packet.stream_name), end='')
                        if meta is not None:
                                print(" {:.6f}".format(meta.getTimestamp()), meta.getSequenceNum(), end='')
                                if not (packet.stream_name.startswith('disparity')
                                         or packet.stream_name.startswith('depth')):
                                        print('', meta.getCameraName(), end='')
                        print()
                        return

                for stream in stream_names:
                        if stream in ["disparity", "disparity_color", "depth"]:
                                cv2.namedWindow(stream)
                                trackbar_name = 'Disparity confidence'
                                conf_thr_slider_min = 0
                                conf_thr_slider_max = 255
                                cv2.createTrackbar(trackbar_name, stream, conf_thr_slider_min, conf_thr_slider_max, self.on_trackbar_change)
                                cv2.setTrackbarPos(trackbar_name, stream, args['disparity_confidence_threshold'])

                right_rectified = None
                pcl_not_set = True


                # ops = 0
                # prevTime = time()
                # task  = False

                if is_rpi and args['motor']:
                    """
                    Insert Code for App 2 Here
                    """
                if args['cnn_model'] == "app2":
                    """
                    Insert Code for App 2 Here
                    """
                try:
                    pickle_file_data = read_pickle("face_encodings")
                except:
                    pickle_file_data = []
                self.schedule_task_()
                if is_rpi:
                    """
                    Insert Code for App 1 Here
                    """
                while self.runThread:
                        

                        # retreive data from the device
                        # data is stored in packets, there are nnet (Neural NETwork) packets which have additional functions for NNet result interpretation
                        self.nnet_packets, self.data_packets = p.get_available_nnet_and_data_packets(blocking=True)
                        detected_label = []

                        ### Uncomment to print ops
                        # ops = ops + 1
                        # if time() - prevTime > 1.0:
                        #     print('OPS: ', ops)
                        #     ops = 0
                        #     prevTime = time()

                        packets_len = len(self.nnet_packets) + len(self.data_packets)
                        if packets_len != 0:
                                self.reset_process_wd()
                        else:
                                cur_time=monotonic()
                                if cur_time > wd_cutoff:
                                        print("process watchdog timeout")
                                        os._exit(10)

                        for _, nnet_packet in enumerate(self.nnet_packets):
                                if args['verbose']: print_packet_info(nnet_packet)

                                meta = nnet_packet.getMetadata()
                                camera = 'rgb'
                                if meta != None:
                                        camera = meta.getCameraName()
                                nnet_prev["nnet_source"][camera] = nnet_packet
                                nnet_prev["entries_prev"][camera] = decode_nn(nnet_packet, config=config, NN_json=NN_json)
                                frame_count['metaout'] += 1
                                frame_count['nn'][camera] += 1

                        for packet in self.data_packets:
                                window_name = packet.stream_name
                                if packet.stream_name not in stream_names:
                                        continue # skip streams that were automatically added
                                if args['verbose']: print_packet_info(packet)
                                packetData = packet.getData()
                                if packetData is None:
                                        print('Invalid packet data!')
                                        continue
                                elif packet.stream_name == 'previewout':
                                        meta = packet.getMetadata()
                                        camera = 'rgb'
                                        if meta != None:
                                                camera = meta.getCameraName()

                                        window_name = 'previewout-' + camera
                                        # the format of previewout image is CHW (Chanel, Height, Width), but OpenCV needs HWC, so we
                                        # change shape (3, 300, 300) -> (300, 300, 3)
                                        data0 = packetData[0,:,:]
                                        data1 = packetData[1,:,:]
                                        data2 = packetData[2,:,:]
                                        frame = cv2.merge([data0, data1, data2])

                                        if nnet_prev["entries_prev"][camera] is not None:
                                                try:
                                                    
                                                    global task_run_motor, task_play_sound, task_start_led
                                                    if args['cnn_model2'] == "app5_landmark":
                                                         """
                                                        Insert Code for App 5 Here
                                                        """
                                                         
                                                    elif args['cnn_model'] == "app4":
                                                        """
                                                        Insert Code for App 4 Here
                                                        """
                                                    elif args['social_distance']:
                                                        """
                                                        Insert Code for App 2 Here
                                                        """
                                                    else:
                                                            frame, detected_label = show_nn(nnet_prev["entries_prev"][camera], frame, NN_json=NN_json, config=config)
                                                            print(detected_label)
                                                    for label in detected_label:
                                                            

                                                            if str(label) == "mask" and is_rpi and args['motor'] and task_run_motor:
                                                                """
                                                                Insert Code for App 3 Here
                                                                """
                                                            if str(label) == "person" and args['play_sound'] and task_play_sound:
                                                                """
                                                                Insert Code for App 1 Here
                                                                """
                                                            if is_rpi and args['cnn_model'] == "app1" and str(label) \
                                                                """
                                                                Insert Code for App 1 Here
                                                                """
                                                                    
                                                                    
                                                except:
                                                        
                                                        frame = show_nn(nnet_prev["entries_prev"][camera], frame, NN_json=NN_json, config=config)

                                                if enable_object_tracker and tracklets is not None:
                                                    frame = show_tracklets(tracklets, frame, labels)
                                        #cv2.putText(frame, "fps: " + str(frame_count_prev[window_name]), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),2)
                                        cv2.putText(frame, "NN fps: " + str(frame_count_prev['nn'][camera]), (2, frame.shape[0]-4), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0))
                                        cv2.imshow(window_name, frame)
                                elif packet.stream_name in ['left', 'right', 'disparity', 'rectified_left', 'rectified_right']:
                                        frame_bgr = packetData
                                        if args['pointcloud'] and packet.stream_name == 'rectified_right':
                                                right_rectified = packetData
                                        cv2.putText(frame_bgr, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                                        cv2.putText(frame_bgr, "fps: " + str(frame_count_prev[window_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                                        camera = None
                                        if args['draw_bb_depth']:
                                                camera = args['cnn_camera']
                                                if packet.stream_name == 'disparity':
                                                        if camera == 'left_right':
                                                                camera = 'right'
                                                elif camera != 'rgb':
                                                        camera = packet.getMetadata().getCameraName()
                                                if nnet_prev["entries_prev"][camera] is not None:
                                                        frame_bgr, detected_label = show_nn(nnet_prev["entries_prev"][camera], frame_bgr,
                                                                                                                                NN_json=NN_json,
                                                                                                 config=config, nn2depth=nn2depth)
                                        cv2.imshow(window_name, frame_bgr)
                                elif packet.stream_name.startswith('depth') or packet.stream_name == 'disparity_color':
                                        frame = packetData

                                        if len(frame.shape) == 2:
                                                if frame.dtype == np.uint8: # grayscale
                                                        cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
                                                        cv2.putText(frame, "fps: " + str(frame_count_prev[window_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
                                                else: # uint16
                                                        if args['pointcloud'] and "depth" in stream_names and "rectified_right" in stream_names and right_rectified is not None:
                                                                if pcl_not_set:
                                                                        pcl_converter = PointCloudVisualizer(self.device.get_right_intrinsic(), 1280, 720)
                                                                        pcl_not_set =  False
                                                                right_rectified = cv2.flip(right_rectified, 1)
                                                                pcd = pcl_converter.rgbd_to_projection(frame, right_rectified)
                                                                pcl_converter.visualize_pcd()

                                                        frame = (65535 // frame).astype(np.uint8)
                                                        #colorize depth map, comment out code below to obtain grayscale
                                                        frame = cv2.applyColorMap(frame, cv2.COLORMAP_HOT)
                                                        # frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET)
                                                        cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
                                                        cv2.putText(frame, "fps: " + str(frame_count_prev[window_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)
                                        else: # bgr
                                                cv2.putText(frame, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
                                                cv2.putText(frame, "fps: " + str(frame_count_prev[window_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, 255)

                                        if args['draw_bb_depth']:
                                                camera = args['cnn_camera']
                                                if camera == 'left_right':
                                                        camera = 'right'
                                                if nnet_prev["entries_prev"][camera] is not None:
                                                        frame, detected_label = show_nn(nnet_prev["entries_prev"][camera], frame, NN_json=NN_json,
                                                                                         config=config, nn2depth=nn2depth)
                                        cv2.imshow(window_name, frame)

                                elif packet.stream_name == 'jpegout':
                                        jpg = packetData
                                        mat = cv2.imdecode(jpg, cv2.IMREAD_COLOR)
                                        cv2.imshow('jpegout', mat)

                                elif packet.stream_name == 'video':
                                        videoFrame = packetData
                                        videoFrame.tofile(video_file)
                                        #mjpeg = packetData
                                        #mat = cv2.imdecode(mjpeg, cv2.IMREAD_COLOR)
                                        #cv2.imshow('mjpeg', mat)
                                elif packet.stream_name == 'color':
                                        meta = packet.getMetadata()
                                        w = meta.getFrameWidth()
                                        h = meta.getFrameHeight()
                                        yuv420p = packetData.reshape( (h * 3 // 2, w) )
                                        bgr = cv2.cvtColor(yuv420p, cv2.COLOR_YUV2BGR_IYUV)
                                        scale = configMan.getColorPreviewScale()
                                        bgr = cv2.resize(bgr, ( int(w*scale), int(h*scale) ), interpolation = cv2.INTER_AREA)
                                        cv2.putText(bgr, packet.stream_name, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                                        cv2.putText(bgr, "fps: " + str(frame_count_prev[window_name]), (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
                                        cv2.imshow("color", bgr)

                                elif packet.stream_name == 'meta_d2h':
                                        str_ = packet.getDataAsStr()
                                        dict_ = json.loads(str_)

                                        print('meta_d2h Temp',
                                                ' CSS:' + '{:6.2f}'.format(dict_['sensors']['temperature']['css']),
                                                ' MSS:' + '{:6.2f}'.format(dict_['sensors']['temperature']['mss']),
                                                ' UPA:' + '{:6.2f}'.format(dict_['sensors']['temperature']['upa0']),
                                                ' DSS:' + '{:6.2f}'.format(dict_['sensors']['temperature']['upa1']))
                                elif packet.stream_name == 'object_tracker':
                                        tracklets = packet.getObjectTracker()

                                frame_count[window_name] += 1

                        t_curr = time()
                        if t_start + 1.0 < t_curr:
                                t_start = t_curr
                                # print("metaout fps: " + str(frame_count_prev["metaout"]))

                                stream_windows = []
                                for s in stream_names:
                                        if s == 'previewout':
                                                for cam in NN_cams:
                                                        stream_windows.append(s + '-' + cam)
                                                        frame_count_prev['nn'][cam] = frame_count['nn'][cam]
                                                        frame_count['nn'][cam] = 0
                                        else:
                                                stream_windows.append(s)
                                for w in stream_windows:
                                        frame_count_prev[w] = frame_count[w]
                                        frame_count[w] = 0

                        key = cv2.waitKey(1)
                        if key == ord('c'):
                                if 'jpegout' in stream_names == 0:
                                        print("'jpegout' stream not enabled. Try settings -s jpegout to enable it")
                                else:
                                        self.device.request_jpeg()
                        elif key == ord('f'):
                                self.device.request_af_trigger()
                        elif key == ord('1'):
                                self.device.request_af_mode(depthai.AutofocusMode.AF_MODE_AUTO)
                        elif key == ord('2'):
                                self.device.request_af_mode(depthai.AutofocusMode.AF_MODE_CONTINUOUS_VIDEO)
                        elif key == ord('a'):
                            user_name = input()
                            add_face = True
                        elif key == ord('q'):
                                break
                    

                del p  # in order to stop the pipeline object should be deleted, otherwise device will continue working. This is required if you are going to add code after the main loop, otherwise you can ommit it.
                del self.device

                # Close video output file if was opened
                if video_file is not None:
                        video_file.close()

                print('py: DONE.')
Esempio n. 5
0
    def __init__(self):
        super().__init__('depthai_publisher')

        # setup a cli param for passing in command line arguments.
        self.cliParamName = "cliArgs"
        paramDefault = ""
        paramDesc = ParameterDescriptor(
            type=ParameterType.PARAMETER_STRING,
            description=
            "arguments that match the command line script depthai-demo.py input"
        )
        self.declare_parameter(self.cliParamName, paramDefault, paramDesc)

        # get cli param
        my_param = self.get_parameter(
            self.cliParamName).get_parameter_value().string_value
        new_argv = [sys.argv[0]]
        if my_param is not "":
            paramList = my_param.split(" ")
            for param in paramList:
                new_argv.append(param)

        # setup argv for parsing.
        self.old_argv = sys.argv
        sys.argv = new_argv

        # parse params
        cliArgs = CliArgs()
        self.args = vars(cliArgs.parse_args())
        print(self.args)

        configMan = DepthConfigManager(self.args)
        self.cmd_file, self.debug_mode = configMan.getCommandFile()
        self.usb2_mode = configMan.getUsb2Mode()
        self.decode_nn = configMan.decode_nn
        self.decode_nn_json = configMan.decode_nn_json
        self.show_nn = configMan.show_nn
        self.labels = configMan.labels
        self.NN_json = configMan.NN_config

        # This json file is sent to DepthAI. It communicates what options you'd like to enable and what model you'd like to run.
        self.config = configMan.jsonConfig

        self.targetDev = self.args['device_id'].replace(".", "_")

        # setup publishers
        self.previewPublisher = self.create_publisher(
            UInt8MultiArray, 'preview' + self.targetDev, 10)
        self.previewmsg = UInt8MultiArray()

        self.leftPublisher = self.create_publisher(UInt8MultiArray,
                                                   'left' + self.targetDev, 10)
        self.leftmsg = UInt8MultiArray()

        self.rightPublisher = self.create_publisher(UInt8MultiArray,
                                                    'right' + self.targetDev,
                                                    10)
        self.rightmsg = UInt8MultiArray()

        self.disparityPublisher = self.create_publisher(
            UInt8MultiArray, 'disparity' + self.targetDev, 10)
        self.disparitymsg = UInt8MultiArray()

        self.depthPublisher = self.create_publisher(UInt8MultiArray,
                                                    'depth' + self.targetDev,
                                                    10)
        self.depthmsg = UInt8MultiArray()

        self.d2hPublisher = self.create_publisher(String,
                                                  'd2h' + self.targetDev, 10)
        self.d2hmsg = String()

        self.nnResultPublisher = self.create_publisher(String,
                                                       'meta' + self.targetDev,
                                                       10)
        self.nnmsg = String()

        # Create a list of enabled streams ()
        self.stream_names = [
            stream if isinstance(stream, str) else stream['name']
            for stream in configMan.stream_list
        ]
        self.enable_object_tracker = 'object_tracker' in self.stream_names
Esempio n. 6
0
    def start_loop(self):
        top_left = (137, 204)
        width = 300
        height = 150
        bottom_right = (top_left[0] + width, top_left[1] + height)

        mode = "both"  # "3dvis", "wls" or "both"
        persist = False
        output_path = "../../oakmower_training_data"
        persister = Persister(output_path)
        depth_classifier = DisparityClassifier(top_left, width, height)
        plane_classifier = PlaneClassifier()
        motion_evaluator = MotionEvaluator()

        if mode == "3dvis":
            streams = ["disparity", 'rectified_right']
            fit_plane = True
            filter_wls = False
        elif mode == "wls":
            streams = ["disparity", 'rectified_right']
            fit_plane = False
            filter_wls = True
        elif mode == "both":
            streams = [
                'previewout',
                'metaout',
                "disparity",
                'rectified_right',
            ]
            fit_plane = True
            filter_wls = True
        else:
            streams = ["rectified_right"]
            fit_plane = False
            filter_wls = False
            print("no mode selected")

        wls_filter = WLSFilter(persister=persister,
                               show_single_windows=self.show_single_windows)

        rectified_right = None
        pcl_projector = None
        detections = []

        cliArgs = CliArgs()
        args = vars(cliArgs.parse_args())

        configMan = DepthConfigManager(args)

        config = configMan.jsonConfig
        config["streams"] = streams
        config["camera"]["mono"]["resolution_h"] = 400
        config["camera"]["mono"]["fps"] = 10
        config["camera"]["rgb"]["fps"] = 10

        self.device = depthai.Device(args['device_id'], False)
        p = self.device.create_pipeline(config=config)

        if p is None:
            print('Pipeline is not created.')
            exit(3)

        self.reset_process_wd()

        while self.runThread:
            nnet_packets, data_packets = p.get_available_nnet_and_data_packets(
                True)

            packets_len = len(nnet_packets) + len(data_packets)
            if packets_len != 0:
                self.reset_process_wd()
            else:
                cur_time = time.monotonic()
                if cur_time > wd_cutoff:
                    print("process watchdog timeout")
                    os._exit(10)

            for nnet_packet in nnet_packets:
                detections = list(nnet_packet.getDetectedObjects())

            for packet in data_packets:

                if packet.stream_name == 'previewout':
                    data = packet.getData()
                    data0 = data[0, :, :]
                    img_h = data0.shape[0]
                    img_w = data0.shape[1]
                    data1 = data[1, :, :]
                    data2 = data[2, :, :]
                    alpha = 0.3
                    beta = (1.0 - alpha)
                    red_im = np.ones_like(data2) * 255
                    data2[int(img_h / 2):, :] = cv2.addWeighted(
                        data2[int(img_h / 2):, :], alpha,
                        red_im[int(img_h / 2):, :], beta, 0.0)
                    frame = cv2.merge([data0, data1, data2])
                    self.status["objects_clear"] = True
                    for detection in detections:
                        pt1 = int(detection.x_min * img_w), int(
                            detection.y_min * img_h)
                        pt2 = int(detection.x_max * img_w), int(
                            detection.y_max * img_h)
                        if pt2[1] < int(img_h / 2) or (
                            (pt2[0] - pt1[0]) > 0.95 * img_w) or (
                                (pt2[1] - pt1[1]) > 0.95 * img_h):
                            cv2.rectangle(frame, pt1, pt2, (255, 255, 255), 2)
                        else:
                            cv2.rectangle(frame, pt1, pt2, (0, 0, 255), 2)
                            self.status["objects_clear"] = False
                    objects_frame = np.zeros([400, 640, 3], dtype=np.uint8)
                    objects_frame[54:354, 137:437] = frame
                    if self.show_single_windows:
                        cv2.imshow('cockpit', objects_frame)
                    self.objects_frame = self.label_imgs(
                        objects_frame, "Object detection")

                    if persister is not None:
                        persister.add_preview_bb_free(frame)

                if packet.stream_name == "rectified_right":
                    rectified_right = packet.getData()
                    rectified_right = cv2.flip(rectified_right, flipCode=1)
                    motion_evaluator.update(
                        np.copy(rectified_right[top_left[1]:bottom_right[1],
                                                top_left[0]:bottom_right[0]]))
                    self.status[
                        "moving_forward"] = motion_evaluator.get_motion()
                    rect_right_bb = wls_filter.update(rectified_right,
                                                      packet.stream_name)
                    if rect_right_bb is not None:
                        self.rectified_right_frame = self.label_imgs(
                            cv2.cvtColor(rect_right_bb, cv2.COLOR_GRAY2BGR),
                            "Right camera")

                elif packet.stream_name == "depth" or packet.stream_name == "disparity":

                    if packet.stream_name == "disparity":
                        disparity_frame = packet.getData()
                        if fit_plane:
                            frame_f64 = disparity_frame.astype(np.float64)
                            frame_f64[frame_f64 < 1] = 0.000001
                            depth = 75 * 883.14 / frame_f64
                            frame16 = depth.astype(np.uint16)
                            frame16[frame16 < 1] = 65535
                            depth_frame = frame16
                        else:
                            depth_frame = np.zeros_like(disparity_frame)
                    else:
                        depth_frame = packet.getData()
                        disparity_frame = None

                    if filter_wls:
                        wls_filter.update(frame=disparity_frame,
                                          stream_name=packet.stream_name)

                    if fit_plane:
                        if rectified_right is not None:
                            if pcl_projector is None:
                                fd, path = tempfile.mkstemp(suffix='.json')
                                with os.fdopen(fd, 'w') as tmp:
                                    json.dump(
                                        {
                                            "width":
                                            600,
                                            "height":
                                            400,
                                            "intrinsic_matrix": [
                                                item for row in self.device.
                                                get_right_intrinsic()
                                                for item in row
                                            ]
                                        }, tmp)
                                pcl_projector = PointCloudProjector(
                                    path, persister=persister)
                            plane_parameters = pcl_projector.rgbd_to_projection(
                                depth_frame, np.ones_like(rectified_right))
                            self.status["plane"] = plane_classifier.classify(
                                plane_parameters)
                            pcl_projector.visualize_pcd()

                    depth_frame8 = (65535 // depth_frame).astype(np.uint8)
                    # colorize depth map, comment out code below to obtain grayscale
                    depth_frame8 = cv2.applyColorMap(depth_frame8,
                                                     cv2.COLORMAP_HOT)
                    depth_frame8 = cv2.rectangle(depth_frame8, top_left,
                                                 bottom_right, (255, 255, 255),
                                                 2)
                    if self.show_single_windows:
                        cv2.imshow("depth", depth_frame8)
                    self.depth_frame = self.label_imgs(depth_frame8, "Depth")

                if filter_wls:
                    colored_wls = wls_filter.filter()
                    if colored_wls is not None:
                        self.wls_frame = self.label_imgs(
                            colored_wls,
                            "Disparity (filtered using right camera)")
                    if wls_filter.filtered_disp is not None:
                        self.status[
                            "disparity_clear"] = depth_classifier.classify(
                                wls_filter.filtered_disp)

                if self.show_overview:
                    comb_frame = np.vstack([
                        self.visualize_classifications(),
                        np.hstack([self.objects_frame, self.wls_frame]),
                        np.hstack(
                            [self.depth_frame, self.rectified_right_frame])
                    ])
                    cv2.imshow("overview", comb_frame)

            print(self.status)

            if cv2.waitKey(1) == ord("s"):
                print("Writing to disk")
                persister.write_to_disk()
                persister.entries.clear()
            if cv2.waitKey(1) == ord("c"):
                persister.entries.clear()
            if cv2.waitKey(1) == ord("p"):
                if persist:
                    print("Accumulating data stopped")
                    persist = False
                else:
                    print("Accumulating data")
                    persist = True
            if cv2.waitKey(1) == ord("q"):
                break

        del p  # in order to stop the pipeline object should be deleted, otherwise device will continue working. This is required if you are going to add code after the main loop, otherwise you can ommit it.
        del self.device