def test_pipeline(): if collecting=='r': pipeline, streams = create_rgb_cam_pipeline() #pipeline, streams = create_mono_cam_pipeline() elif collecting=='d': pipeline, streams = create_stereo_depth_pipeline(source_camera) if use_calibration: exp_time_mono=calib_mono['exp_time'] sens_iso_mono=calib_mono['sens_iso'] exp_time_color=calib_color['exp_time'] sens_iso_color=calib_color['sens_iso'] lens_pos_color=calib_color['lens_pos'] print("Creating DepthAI device") with dai.Device(pipeline) as device: print("Starting pipeline") device.startPipeline() if collecting=='d' and use_calibration: controlQueue_m = device.getInputQueue('control_m') ctrl = dai.CameraControl() ctrl.setAutoExposureEnable() #ctrl.setManualExposure(exp_time_mono, sens_iso_mono) controlQueue_m.send(ctrl) elif collecting=='r' and use_calibration: controlQueue_r = device.getInputQueue('control_r') ctrl1 = dai.CameraControl() ctrl.setAutoExposureEnable() #ctrl1.setManualExposure(exp_time_color, sens_iso_color) if focus_mode==0: ctrl1.setManualFocus(lens_pos_color) else: ctrl1.setAutoFocusMode(dai.RawCameraControl.AutoFocusMode(int(focus_mode))) controlQueue_r.send(ctrl1) # Create a receive queue for each stream q_list = [] for s in streams: q = device.getOutputQueue(s, 8, blocking=False) q_list.append(q) # Need to set a timestamp for input frames, for the sync stage in Stereo node disp_frame_count=0 while True: # Handle output streams for q in q_list: name = q.getName() image = q.get() #print("Received frame:", name) # Skip some streams for now, to reduce CPU load if name in ['left', 'right', 'depth']: continue frame,disp_frame_count = convert_to_cv2_frame(name, image,disp_frame_count) #cv2.imshow(name, frame) if disp_frame_count>=(int(n)+50): break if disp_frame_count>=(int(n)+50): break if cv2.waitKey(1) == ord('q'): break
def set_focus(self, focus): qControl = self.device.getInputQueue(name="camControl") camControl = dai.CameraControl() camControl.setAutoFocusMode(depthai.RawCameraControl.AutoFocusMode.OFF) camControl.setManualFocus(focus) qControl.send(camControl) print("Focus:", focus)
def main(): frame_q = Queue(50) store_p = Process(target=store_frames, args=(frame_q, )) store_p.start() try: # Pipeline defined, now the device is connected to with dai.Device() as device: cams = device.getConnectedCameras() depth_enabled = dai.CameraBoardSocket.LEFT in cams and dai.CameraBoardSocket.RIGHT in cams ps = None if depth_enabled: ps = PairingSystem() else: PairingSystem.seq_streams = [] device.startPipeline(create_pipeline(depth_enabled)) qControl = device.getInputQueue('control') ctrl = dai.CameraControl() if args.autofocus: ctrl.setAutoFocusMode(getattr(dai.CameraControl.AutoFocusMode, args.autofocus)) if args.manualfocus: ctrl.setManualFocus(args.manualfocus) if all(exposure): ctrl.setManualExposure(*exposure) qControl.send(ctrl) start_ts = monotonic() while True: for queueName in PairingSystem.seq_streams + PairingSystem.ts_streams: packets = device.getOutputQueue(queueName).tryGetAll() if ps is not None: ps.add_packets(packets, queueName) elif queueName == "color": for packet in packets: frame_q.put({"color": extract_frame[queueName](packet)}) if queueName == "color" and len(packets) > 0 and not args.prod: cv2.imshow("preview", packets[-1].getCvFrame()) if ps is not None: pairs = ps.get_pairs() for pair in pairs: extracted_pair = {stream_name: extract_frame[stream_name](item) for stream_name, item in pair.items()} if not args.prod: for stream_name, item in extracted_pair.items(): cv2.imshow(stream_name, item) frame_q.put(extracted_pair) if not args.prod and cv2.waitKey(1) == ord('q'): break if monotonic() - start_ts > args.time: break finally: frame_q.put(None) store_p.join()
ve2.bitstream.link(ve2Out.input) ve3Out = pipeline.createXLinkOut() ve3Out.setStreamName('ve3Out') ve3.bitstream.link(ve3Out.input) # Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as dev: # Start pipeline dev.startPipeline() controlQueueGrey = dev.getInputQueue('controlGrey') controlQueueColor = dev.getInputQueue('controlColor') if settings_grey.get("exp") is not None and settings_grey.get( "iso") is not None: ctrl = dai.CameraControl() ctrl.setManualExposure(settings_grey["exp"], settings_grey["iso"]) controlQueueGrey.send(ctrl) if settings_color.get("autofocus") is not None: if not settings_color["autofocus"]: ctrl = dai.CameraControl() ctrl.setAutoFocusMode(dai.CameraControl.AutoFocusMode.AUTO) ctrl.setAutoFocusTrigger() controlQueueColor.send(ctrl) # Output queues will be used to get the encoded data from the outputs defined above outQ1 = dev.getOutputQueue(name='ve1Out', maxSize=30, blocking=True) outQ2 = dev.getOutputQueue(name='ve2Out', maxSize=30, blocking=True) outQ3 = dev.getOutputQueue(name='ve3Out', maxSize=30, blocking=True)
import cv2 import depthai as dai # check to see if the camera has a command to capture a still daicam = dai.CameraControl() still = daicam.getCaptureStill() if still: pass else: print(f"There is no command to capture a still: {still}", "\nFixing it!") # daicam_truth = daicam.setCaptureStill(True) print(f"This is the daicam_truth: {daicam_truth}!") print(f"This is what we started with: {still}") still = daicam_truth print(f"Now this is what we got: {still}") daicam.setCaptureStill(True) if still: print("still capture is set.")
def asControl(roi): camControl = dai.CameraControl() camControl.setAutoExposureRegion(*roi) return camControl
cv2.putText(frame, text, coords, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) while True: videoIn = videoQ.tryGet() if videoIn is not None: frame = videoIn.getCvFrame() putText(frame, f"[E] Effect: {curr_effect}", (10, 20)) putText(frame, f"[S] Scene: {curr_scene}", (10, 40)) cv2.imshow("video", frame) key = cv2.waitKey(1) if key == ord('e') or key == ord('E'): effect = next(effects) print("Switching colorCamera effect:", str(effect)) curr_effect = str(effect).lstrip("EffectMode.") cfg = dai.CameraControl() cfg.setEffectMode(effect) ctrlQ.send(cfg) # Scene currently doesn't work elif key == ord('s') or key == ord('S'): scene = next(scenes) print("Currently doesn't work! Switching colorCamera Scene:", str(scene)) curr_scene = str(scene).lstrip("SceneMode.") cfg = dai.CameraControl() cfg.setSceneMode(scene) ctrlQ.send(cfg) elif key == ord('q'): break
def focus(self, value: int) -> None: self._focus = value q_control = self._device.getInputQueue(name="cam_control") cam_control = dai.CameraControl() cam_control.setManualFocus(value) q_control.send(cam_control)