def _get_vipc_server(length): w, h = {3 * w * h: (w, h) for (w, h) in [tici_f_frame_size, eon_f_frame_size]}[length] vipc_server = VisionIpcServer("camerad") vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_BACK, 4, True, w, h) vipc_server.start_listener() return vipc_server
def test_rotation(self): os.environ["LOGGERD_TEST"] = "1" Params().put("RecordFront", "1") expected_files = { "rlog", "qlog", "qcamera.ts", "fcamera.hevc", "dcamera.hevc" } streams = [(VisionStreamType.VISION_STREAM_ROAD, tici_f_frame_size if TICI else eon_f_frame_size, "roadCameraState"), (VisionStreamType.VISION_STREAM_DRIVER, tici_d_frame_size if TICI else eon_d_frame_size, "driverCameraState")] if TICI: expected_files.add("ecamera.hevc") streams.append((VisionStreamType.VISION_STREAM_WIDE_ROAD, tici_e_frame_size, "wideRoadCameraState")) pm = messaging.PubMaster( ["roadCameraState", "driverCameraState", "wideRoadCameraState"]) vipc_server = VisionIpcServer("camerad") for stream_type, frame_size, _ in streams: vipc_server.create_buffers(stream_type, 40, False, *(frame_size)) vipc_server.start_listener() for _ in range(5): num_segs = random.randint(2, 5) length = random.randint(1, 3) os.environ["LOGGERD_SEGMENT_LENGTH"] = str(length) managed_processes["loggerd"].start() managed_processes["encoderd"].start() fps = 20.0 for n in range(1, int(num_segs * length * fps) + 1): for stream_type, frame_size, state in streams: dat = np.empty(int(frame_size[0] * frame_size[1] * 3 / 2), dtype=np.uint8) vipc_server.send(stream_type, dat[:].flatten().tobytes(), n, n / fps, n / fps) camera_state = messaging.new_message(state) frame = getattr(camera_state, state) frame.frameId = n pm.send(state, camera_state) time.sleep(1.0 / fps) managed_processes["loggerd"].stop() managed_processes["encoderd"].stop() route_path = str(self._get_latest_log_dir()).rsplit("--", 1)[0] for n in range(num_segs): p = Path(f"{route_path}--{n}") logged = {f.name for f in p.iterdir() if f.is_file()} diff = logged ^ expected_files self.assertEqual( len(diff), 0, f"didn't get all expected files. run={_} seg={n} {route_path=}, {diff=}\n{logged=} {expected_files=}" )
def replay_cameras(lr, frs): eon_cameras = [ ("roadCameraState", DT_MDL, eon_f_frame_size, VisionStreamType.VISION_STREAM_ROAD, True), ("driverCameraState", DT_DMON, eon_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER, False), ] tici_cameras = [ ("roadCameraState", DT_MDL, tici_f_frame_size, VisionStreamType.VISION_STREAM_ROAD, True), ("driverCameraState", DT_MDL, tici_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER, False), ] def replay_camera(s, stream, dt, vipc_server, frames, size, use_extra_client): pm = messaging.PubMaster([s, ]) rk = Ratekeeper(1 / dt, print_delay_threshold=None) img = b"\x00" * int(size[0] * size[1] * 3 / 2) while True: if frames is not None: img = frames[rk.frame % len(frames)] rk.keep_time() m = messaging.new_message(s) msg = getattr(m, s) msg.frameId = rk.frame msg.timestampSof = m.logMonoTime msg.timestampEof = m.logMonoTime pm.send(s, m) vipc_server.send(stream, img, msg.frameId, msg.timestampSof, msg.timestampEof) if use_extra_client: vipc_server.send(VisionStreamType.VISION_STREAM_WIDE_ROAD, img, msg.frameId, msg.timestampSof, msg.timestampEof) init_data = [m for m in lr if m.which() == 'initData'][0] cameras = tici_cameras if (init_data.initData.deviceType == 'tici') else eon_cameras # init vipc server and cameras p = [] vs = VisionIpcServer("camerad") for (s, dt, size, stream, use_extra_client) in cameras: fr = frs.get(s, None) frames = None if fr is not None: print(f"Decompressing frames {s}") frames = [] for i in tqdm(range(fr.frame_count)): img = fr.get(i, pix_fmt='yuv420p')[0] frames.append(img.flatten().tobytes()) vs.create_buffers(stream, 40, False, size[0], size[1]) if use_extra_client: vs.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False, size[0], size[1]) p.append(multiprocessing.Process(target=replay_camera, args=(s, stream, dt, vs, frames, size, use_extra_client))) vs.start_listener() return vs, p
class TestModeld(unittest.TestCase): def setUp(self): self.vipc_server = VisionIpcServer("camerad") self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 40, False, *tici_f_frame_size) self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER, 40, False, *tici_f_frame_size) self.vipc_server.create_buffers( VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False, *tici_f_frame_size) self.vipc_server.start_listener() self.sm = messaging.SubMaster(['modelV2', 'cameraOdometry']) self.pm = messaging.PubMaster([ 'roadCameraState', 'wideRoadCameraState', 'driverCameraState', 'liveCalibration', 'lateralPlan' ]) managed_processes['modeld'].start() time.sleep(0.2) self.sm.update(1000) def tearDown(self): managed_processes['modeld'].stop() del self.vipc_server def test_modeld(self): for n in range(1, 500): for cam in ('roadCameraState', 'wideRoadCameraState'): msg = messaging.new_message(cam) cs = getattr(msg, cam) cs.frameId = n cs.timestampSof = int((n * DT_MDL) * 1e9) cs.timestampEof = int(cs.timestampSof + (DT_MDL * 1e9)) self.pm.send(msg.which(), msg) self.vipc_server.send(VIPC_STREAM[msg.which()], IMG_BYTES, cs.frameId, cs.timestampSof, cs.timestampEof) self.sm.update(5000) if self.sm['modelV2'].frameId != self.sm['cameraOdometry'].frameId: self.sm.update(1000) mdl = self.sm['modelV2'] self.assertEqual(mdl.frameId, n) self.assertEqual(mdl.frameIdExtra, n) self.assertEqual(mdl.timestampEof, cs.timestampEof) self.assertEqual(mdl.frameAge, 0) self.assertEqual(mdl.frameDropPerc, 0) odo = self.sm['cameraOdometry'] self.assertEqual(odo.frameId, n) self.assertEqual(odo.timestampEof, cs.timestampEof) def test_skipped_frames(self): pass
class Camerad: def __init__(self): self.frame_road_id = 0 self.frame_wide_id = 0 self.vipc_server = VisionIpcServer("camerad") self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 5, False, W, H) self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 5, False, W, H) self.vipc_server.start_listener() # set up for pyopencl rgb to yuv conversion self.ctx = cl.create_some_context() self.queue = cl.CommandQueue(self.ctx) cl_arg = f" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W * 3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG " # TODO: move rgb_to_yuv.cl to local dir once the frame stream camera is removed kernel_fn = os.path.join(BASEDIR, "selfdrive", "camerad", "transforms", "rgb_to_yuv.cl") with open(kernel_fn) as f: prg = cl.Program(self.ctx, f.read()).build(cl_arg) self.krnl = prg.rgb_to_yuv self.Wdiv4 = W // 4 if (W % 4 == 0) else (W + (4 - W % 4)) // 4 self.Hdiv4 = H // 4 if (H % 4 == 0) else (H + (4 - H % 4)) // 4 def cam_callback_road(self, image): self._cam_callback(image, self.frame_road_id, 'roadCameraState', VisionStreamType.VISION_STREAM_ROAD) self.frame_road_id += 1 def cam_callback_wide_road(self, image): self._cam_callback(image, self.frame_wide_id, 'wideRoadCameraState', VisionStreamType.VISION_STREAM_WIDE_ROAD) self.frame_wide_id += 1 def _cam_callback(self, image, frame_id, pub_type, yuv_type): img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) img = np.reshape(img, (H, W, 4)) img = img[:, :, [0, 1, 2]].copy() # convert RGB frame to YUV rgb = np.reshape(img, (H, W * 3)) rgb_cl = cl_array.to_device(self.queue, rgb) yuv_cl = cl_array.empty_like(rgb_cl) self.krnl(self.queue, (np.int32(self.Wdiv4), np.int32(self.Hdiv4)), None, rgb_cl.data, yuv_cl.data).wait() yuv = np.resize(yuv_cl.get(), rgb.size // 2) eof = int(frame_id * 0.05 * 1e9) self.vipc_server.send(yuv_type, yuv.data.tobytes(), frame_id, eof, eof) dat = messaging.new_message(pub_type) msg = { "frameId": image.frame, "transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] } setattr(dat, pub_type, msg) pm.send(pub_type, dat)
def model_replay(lr_list, frs): spinner = Spinner() spinner.update("starting model replay") vipc_server = VisionIpcServer("camerad") vipc_server.create_buffers( VisionStreamType.VISION_STREAM_YUV_BACK, 40, False, *(tici_f_frame_size if TICI else eon_f_frame_size)) vipc_server.create_buffers( VisionStreamType.VISION_STREAM_YUV_FRONT, 40, False, *(tici_d_frame_size if TICI else eon_d_frame_size)) vipc_server.start_listener() pm = messaging.PubMaster([ 'roadCameraState', 'driverCameraState', 'liveCalibration', 'lateralPlan' ]) sm = messaging.SubMaster(['modelV2', 'driverState']) try: managed_processes['modeld'].start() managed_processes['dmonitoringmodeld'].start() time.sleep(5) sm.update(1000) last_desire = None log_msgs = [] frame_idxs = dict.fromkeys(['roadCameraState', 'driverCameraState'], 0) cal = [msg for msg in lr if msg.which() == "liveCalibration"] for msg in cal[:5]: pm.send(msg.which(), replace_calib(msg, None)) for msg in tqdm(lr_list): if msg.which() == "liveCalibration": last_calib = list(msg.liveCalibration.rpyCalib) pm.send(msg.which(), replace_calib(msg, last_calib)) elif msg.which() == "lateralPlan": last_desire = msg.lateralPlan.desire elif msg.which() in ["roadCameraState", "driverCameraState"]: ret = process_frame(msg, pm, sm, log_msgs, vipc_server, spinner, frs, frame_idxs, last_desire) if ret is None: break except KeyboardInterrupt: pass finally: spinner.close() managed_processes['modeld'].stop() managed_processes['dmonitoringmodeld'].stop() return log_msgs
def replay_cameras(lr, frs): cameras = [ ("roadCameraState", DT_MDL, eon_f_frame_size, VisionStreamType.VISION_STREAM_YUV_BACK), ("driverCameraState", DT_DMON, eon_d_frame_size, VisionStreamType.VISION_STREAM_YUV_FRONT), ] def replay_camera(s, stream, dt, vipc_server, fr, size): pm = messaging.PubMaster([ s, ]) rk = Ratekeeper(1 / dt, print_delay_threshold=None) img = b"\x00" * int(size[0] * size[1] * 3 / 2) while True: if fr is not None and False: img = fr.get(rk.frame % fr.frame_count, pix_fmt='yuv420p')[0] img = img.flatten().tobytes() print("got img") rk.keep_time() m = messaging.new_message(s) msg = getattr(m, s) msg.frameId = rk.frame pm.send(s, m) vipc_server.send(stream, img, msg.frameId, msg.timestampSof, msg.timestampEof) # init vipc server and cameras p = [] global vs vs = VisionIpcServer("camerad") for (s, dt, size, stream) in cameras: fr = frs.get(s, None) vs.create_buffers(stream, 40, False, size[0], size[1]) p.append( multiprocessing.Process(target=replay_camera, args=(s, stream, dt, vs, fr, size))) # hack to make UI work vs.create_buffers(VisionStreamType.VISION_STREAM_RGB_BACK, 4, True, eon_f_frame_size[0], eon_f_frame_size[1]) vs.start_listener() return p
action='store_true', help='Use nvidia instead of ffmpeg') parser.add_argument("--cams", default="0,1,2", help="Cameras to decode") args = parser.parse_args() all_cams = [ ("roadEncodeData", VisionStreamType.VISION_STREAM_RGB_ROAD), ("wideRoadEncodeData", VisionStreamType.VISION_STREAM_RGB_WIDE_ROAD), ("driverEncodeData", VisionStreamType.VISION_STREAM_RGB_DRIVER), ] cams = dict([all_cams[int(x)] for x in args.cams.split(",")]) vipc_server = VisionIpcServer("camerad") for vst in cams.values(): vipc_server.create_buffers(vst, 4, True, W, H) vipc_server.start_listener() for k, v in cams.items(): FIFO_NAME = "/tmp/decodepipe_" + k if os.path.exists(FIFO_NAME): os.unlink(FIFO_NAME) os.mkfifo(FIFO_NAME) multiprocessing.Process(target=writer, args=(FIFO_NAME, sys.argv[1], k)).start() if args.nvidia: multiprocessing.Process(target=decoder_nvidia, args=(FIFO_NAME, vipc_server, v)).start() else: multiprocessing.Process(target=decoder_ffmpeg, args=(FIFO_NAME, vipc_server, v)).start()
def model_replay(lr, frs): spinner = Spinner() spinner.update("starting model replay") vipc_server = VisionIpcServer("camerad") vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 40, False, *(tici_f_frame_size if TICI else eon_f_frame_size)) vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER, 40, False, *(tici_d_frame_size if TICI else eon_d_frame_size)) vipc_server.start_listener() sm = messaging.SubMaster(['modelV2', 'driverState']) pm = messaging.PubMaster(['roadCameraState', 'driverCameraState', 'liveCalibration', 'lateralPlan']) try: managed_processes['modeld'].start() managed_processes['dmonitoringmodeld'].start() time.sleep(2) sm.update(1000) log_msgs = [] last_desire = None frame_idxs = defaultdict(lambda: 0) # init modeld with valid calibration cal_msgs = [msg for msg in lr if msg.which() == "liveCalibration"] for _ in range(5): pm.send(cal_msgs[0].which(), cal_msgs[0].as_builder()) time.sleep(0.1) for msg in tqdm(lr): if SEND_EXTRA_INPUTS: if msg.which() == "liveCalibration": last_calib = list(msg.liveCalibration.rpyCalib) pm.send(msg.which(), replace_calib(msg, last_calib)) elif msg.which() == "lateralPlan": last_desire = msg.lateralPlan.desire dat = messaging.new_message('lateralPlan') dat.lateralPlan.desire = last_desire pm.send('lateralPlan', dat) if msg.which() in ["roadCameraState", "driverCameraState"]: camera_state = getattr(msg, msg.which()) stream = VisionStreamType.VISION_STREAM_ROAD if msg.which() == "roadCameraState" else VisionStreamType.VISION_STREAM_DRIVER img = frs[msg.which()].get(frame_idxs[msg.which()], pix_fmt="yuv420p")[0] # send camera state and frame pm.send(msg.which(), msg.as_builder()) vipc_server.send(stream, img.flatten().tobytes(), camera_state.frameId, camera_state.timestampSof, camera_state.timestampEof) # wait for a response with Timeout(seconds=15): packet_from_camera = {"roadCameraState": "modelV2", "driverCameraState": "driverState"} log_msgs.append(messaging.recv_one(sm.sock[packet_from_camera[msg.which()]])) frame_idxs[msg.which()] += 1 if frame_idxs[msg.which()] >= frs[msg.which()].frame_count: break spinner.update("replaying models: road %d/%d, driver %d/%d" % (frame_idxs['roadCameraState'], frs['roadCameraState'].frame_count, frame_idxs['driverCameraState'], frs['driverCameraState'].frame_count)) finally: spinner.close() managed_processes['modeld'].stop() managed_processes['dmonitoringmodeld'].stop() return log_msgs
def model_replay(lr, frs): spinner = Spinner() spinner.update("starting model replay") vipc_server = VisionIpcServer("camerad") vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 40, False, *(tici_f_frame_size if TICI else eon_f_frame_size)) vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER, 40, False, *(tici_d_frame_size if TICI else eon_d_frame_size)) vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False, *(tici_f_frame_size)) vipc_server.start_listener() sm = messaging.SubMaster(['modelV2', 'driverState']) pm = messaging.PubMaster(['roadCameraState', 'wideRoadCameraState', 'driverCameraState', 'liveCalibration', 'lateralPlan']) try: managed_processes['modeld'].start() managed_processes['dmonitoringmodeld'].start() time.sleep(5) sm.update(1000) log_msgs = [] last_desire = None recv_cnt = defaultdict(lambda: 0) frame_idxs = defaultdict(lambda: 0) # init modeld with valid calibration cal_msgs = [msg for msg in lr if msg.which() == "liveCalibration"] for _ in range(5): pm.send(cal_msgs[0].which(), cal_msgs[0].as_builder()) time.sleep(0.1) msgs = defaultdict(list) for msg in lr: msgs[msg.which()].append(msg) for cam_msgs in zip_longest(msgs['roadCameraState'], msgs['wideRoadCameraState'], msgs['driverCameraState']): # need a pair of road/wide msgs if TICI and None in (cam_msgs[0], cam_msgs[1]): break for msg in cam_msgs: if msg is None: continue if SEND_EXTRA_INPUTS: if msg.which() == "liveCalibration": last_calib = list(msg.liveCalibration.rpyCalib) pm.send(msg.which(), replace_calib(msg, last_calib)) elif msg.which() == "lateralPlan": last_desire = msg.lateralPlan.desire dat = messaging.new_message('lateralPlan') dat.lateralPlan.desire = last_desire pm.send('lateralPlan', dat) if msg.which() in VIPC_STREAM: msg = msg.as_builder() camera_state = getattr(msg, msg.which()) img = frs[msg.which()].get(frame_idxs[msg.which()], pix_fmt="yuv420p")[0] frame_idxs[msg.which()] += 1 # send camera state and frame camera_state.frameId = frame_idxs[msg.which()] pm.send(msg.which(), msg) vipc_server.send(VIPC_STREAM[msg.which()], img.flatten().tobytes(), camera_state.frameId, camera_state.timestampSof, camera_state.timestampEof) recv = None if msg.which() in ('roadCameraState', 'wideRoadCameraState'): if not TICI or min(frame_idxs['roadCameraState'], frame_idxs['wideRoadCameraState']) > recv_cnt['modelV2']: recv = "modelV2" elif msg.which() == 'driverCameraState': recv = "driverState" # wait for a response with Timeout(15, f"timed out waiting for {recv}"): if recv: recv_cnt[recv] += 1 log_msgs.append(messaging.recv_one(sm.sock[recv])) spinner.update("replaying models: road %d/%d, driver %d/%d" % (frame_idxs['roadCameraState'], frs['roadCameraState'].frame_count, frame_idxs['driverCameraState'], frs['driverCameraState'].frame_count)) if any(frame_idxs[c] >= frs[c].frame_count for c in frame_idxs.keys()): break finally: spinner.close() managed_processes['modeld'].stop() managed_processes['dmonitoringmodeld'].stop() return log_msgs
class TestModeld(unittest.TestCase): def setUp(self): self.vipc_server = VisionIpcServer("camerad") self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 40, False, *tici_f_frame_size) self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER, 40, False, *tici_f_frame_size) self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False, *tici_f_frame_size) self.vipc_server.start_listener() self.sm = messaging.SubMaster(['modelV2', 'cameraOdometry']) self.pm = messaging.PubMaster(['roadCameraState', 'wideRoadCameraState', 'liveCalibration', 'lateralPlan']) managed_processes['modeld'].start() time.sleep(0.2) self.sm.update(1000) def tearDown(self): managed_processes['modeld'].stop() del self.vipc_server def _send_frames(self, frame_id, cams=None): if cams is None: cams = ('roadCameraState', 'wideRoadCameraState') cs = None for cam in cams: msg = messaging.new_message(cam) cs = getattr(msg, cam) cs.frameId = frame_id cs.timestampSof = int((frame_id * DT_MDL) * 1e9) cs.timestampEof = int(cs.timestampSof + (DT_MDL * 1e9)) self.pm.send(msg.which(), msg) self.vipc_server.send(VIPC_STREAM[msg.which()], IMG_BYTES, cs.frameId, cs.timestampSof, cs.timestampEof) return cs def _wait(self): self.sm.update(5000) if self.sm['modelV2'].frameId != self.sm['cameraOdometry'].frameId: self.sm.update(1000) def test_modeld(self): for n in range(1, 500): cs = self._send_frames(n) self._wait() mdl = self.sm['modelV2'] self.assertEqual(mdl.frameId, n) self.assertEqual(mdl.frameIdExtra, n) self.assertEqual(mdl.timestampEof, cs.timestampEof) self.assertEqual(mdl.frameAge, 0) self.assertEqual(mdl.frameDropPerc, 0) odo = self.sm['cameraOdometry'] self.assertEqual(odo.frameId, n) self.assertEqual(odo.timestampEof, cs.timestampEof) def test_dropped_frames(self): """ modeld should only run on consecutive road frames """ frame_id = -1 road_frames = list() for n in range(1, 50): if (random.random() < 0.1) and n > 3: cams = random.choice([(), ('wideRoadCameraState', )]) self._send_frames(n, cams) else: self._send_frames(n) road_frames.append(n) self._wait() if len(road_frames) < 3 or road_frames[-1] - road_frames[-2] == 1: frame_id = road_frames[-1] mdl = self.sm['modelV2'] odo = self.sm['cameraOdometry'] self.assertEqual(mdl.frameId, frame_id) self.assertEqual(mdl.frameIdExtra, frame_id) self.assertEqual(odo.frameId, frame_id) if n != frame_id: self.assertFalse(self.sm.updated['modelV2']) self.assertFalse(self.sm.updated['cameraOdometry'])
def model_replay(lr, fr, desire=None, calib=None): spinner = Spinner() spinner.update("starting model replay") vipc_server = None pm = messaging.PubMaster( ['roadCameraState', 'liveCalibration', 'lateralPlan']) sm = messaging.SubMaster(['modelV2']) # TODO: add dmonitoringmodeld try: managed_processes['modeld'].start() time.sleep(5) sm.update(1000) desires_by_index = { v: k for k, v in log.LateralPlan.Desire.schema.enumerants.items() } cal = [msg for msg in lr if msg.which() == "liveCalibration"] for msg in cal[:5]: pm.send(msg.which(), replace_calib(msg, calib)) log_msgs = [] frame_idx = 0 for msg in tqdm(lr): if msg.which() == "liveCalibration": pm.send(msg.which(), replace_calib(msg, calib)) elif msg.which() == "roadCameraState": if desire is not None: for i in desire[frame_idx].nonzero()[0]: dat = messaging.new_message('lateralPlan') dat.lateralPlan.desire = desires_by_index[i] pm.send('lateralPlan', dat) f = msg.as_builder() pm.send(msg.which(), f) img = fr.get(frame_idx, pix_fmt="yuv420p")[0] if vipc_server is None: w, h = { int(3 * w * h / 2): (w, h) for (w, h) in [tici_f_frame_size, eon_f_frame_size] }[len(img)] vipc_server = VisionIpcServer("camerad") vipc_server.create_buffers( VisionStreamType.VISION_STREAM_YUV_BACK, 40, False, w, h) vipc_server.start_listener() time.sleep(1) # wait for modeld to connect vipc_server.send(VisionStreamType.VISION_STREAM_YUV_BACK, img.flatten().tobytes(), f.roadCameraState.frameId, f.roadCameraState.timestampSof, f.roadCameraState.timestampEof) with Timeout(seconds=15): log_msgs.append(messaging.recv_one(sm.sock['modelV2'])) spinner.update("modeld replay %d/%d" % (frame_idx, fr.frame_count)) frame_idx += 1 if frame_idx >= fr.frame_count: break except KeyboardInterrupt: pass finally: spinner.close() managed_processes['modeld'].stop() return log_msgs