示例#1
0
class Camerad:
    def __init__(self):
        self.frame_road_id = 0
        self.frame_wide_id = 0
        self.vipc_server = VisionIpcServer("camerad")

        self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 5,
                                        False, W, H)
        self.vipc_server.create_buffers(
            VisionStreamType.VISION_STREAM_WIDE_ROAD, 5, False, W, H)
        self.vipc_server.start_listener()

        # set up for pyopencl rgb to yuv conversion
        self.ctx = cl.create_some_context()
        self.queue = cl.CommandQueue(self.ctx)
        cl_arg = f" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W * 3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG "

        # TODO: move rgb_to_yuv.cl to local dir once the frame stream camera is removed
        kernel_fn = os.path.join(BASEDIR, "system", "camerad", "transforms",
                                 "rgb_to_yuv.cl")
        with open(kernel_fn) as f:
            prg = cl.Program(self.ctx, f.read()).build(cl_arg)
            self.krnl = prg.rgb_to_yuv
        self.Wdiv4 = W // 4 if (W % 4 == 0) else (W + (4 - W % 4)) // 4
        self.Hdiv4 = H // 4 if (H % 4 == 0) else (H + (4 - H % 4)) // 4

    def cam_callback_road(self, image):
        self._cam_callback(image, self.frame_road_id, 'roadCameraState',
                           VisionStreamType.VISION_STREAM_ROAD)
        self.frame_road_id += 1

    def cam_callback_wide_road(self, image):
        self._cam_callback(image, self.frame_wide_id, 'wideRoadCameraState',
                           VisionStreamType.VISION_STREAM_WIDE_ROAD)
        self.frame_wide_id += 1

    def _cam_callback(self, image, frame_id, pub_type, yuv_type):
        img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
        img = np.reshape(img, (H, W, 4))
        img = img[:, :, [0, 1, 2]].copy()

        # convert RGB frame to YUV
        rgb = np.reshape(img, (H, W * 3))
        rgb_cl = cl_array.to_device(self.queue, rgb)
        yuv_cl = cl_array.empty_like(rgb_cl)
        self.krnl(self.queue, (np.int32(self.Wdiv4), np.int32(self.Hdiv4)),
                  None, rgb_cl.data, yuv_cl.data).wait()
        yuv = np.resize(yuv_cl.get(), rgb.size // 2)
        eof = int(frame_id * 0.05 * 1e9)

        self.vipc_server.send(yuv_type, yuv.data.tobytes(), frame_id, eof, eof)

        dat = messaging.new_message(pub_type)
        msg = {
            "frameId": frame_id,
            "transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        }
        setattr(dat, pub_type, msg)
        pm.send(pub_type, dat)
示例#2
0
def replay_cameras(lr, frs, disable_tqdm=False):
  eon_cameras = [
    ("roadCameraState", DT_MDL, eon_f_frame_size, VisionStreamType.VISION_STREAM_ROAD, True),
    ("driverCameraState", DT_DMON, eon_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER, False),
  ]
  tici_cameras = [
    ("roadCameraState", DT_MDL, tici_f_frame_size, VisionStreamType.VISION_STREAM_ROAD, True),
    ("driverCameraState", DT_MDL, tici_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER, False),
  ]

  def replay_camera(s, stream, dt, vipc_server, frames, size, use_extra_client):
    pm = messaging.PubMaster([s, ])
    rk = Ratekeeper(1 / dt, print_delay_threshold=None)

    img = b"\x00" * int(size[0] * size[1] * 3 / 2)
    while True:
      if frames is not None:
        img = frames[rk.frame % len(frames)]

      rk.keep_time()

      m = messaging.new_message(s)
      msg = getattr(m, s)
      msg.frameId = rk.frame
      msg.timestampSof = m.logMonoTime
      msg.timestampEof = m.logMonoTime
      pm.send(s, m)

      vipc_server.send(stream, img, msg.frameId, msg.timestampSof, msg.timestampEof)
      if use_extra_client:
        vipc_server.send(VisionStreamType.VISION_STREAM_WIDE_ROAD, img, msg.frameId, msg.timestampSof, msg.timestampEof)

  init_data = [m for m in lr if m.which() == 'initData'][0]
  cameras = tici_cameras if (init_data.initData.deviceType == 'tici') else eon_cameras

  # init vipc server and cameras
  p = []
  vs = VisionIpcServer("camerad")
  for (s, dt, size, stream, use_extra_client) in cameras:
    fr = frs.get(s, None)

    frames = None
    if fr is not None:
      print(f"Decompressing frames {s}")
      frames = []
      for i in tqdm(range(fr.frame_count), disable=disable_tqdm):
        img = fr.get(i, pix_fmt='nv12')[0]
        frames.append(img.flatten().tobytes())

    vs.create_buffers(stream, 40, False, size[0], size[1])
    if use_extra_client:
      vs.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False, size[0], size[1])
    p.append(multiprocessing.Process(target=replay_camera,
                                     args=(s, stream, dt, vs, frames, size, use_extra_client)))

  vs.start_listener()
  return vs, p
示例#3
0
    def setUp(self):
        self.vipc_server = VisionIpcServer("camerad")
        self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD,
                                        40, False, *tici_f_frame_size)
        self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER,
                                        40, False, *tici_f_frame_size)
        self.vipc_server.create_buffers(
            VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False,
            *tici_f_frame_size)
        self.vipc_server.start_listener()

        self.sm = messaging.SubMaster(['modelV2', 'cameraOdometry'])
        self.pm = messaging.PubMaster([
            'roadCameraState', 'wideRoadCameraState', 'liveCalibration',
            'lateralPlan'
        ])

        managed_processes['modeld'].start()
        time.sleep(0.2)
        self.sm.update(1000)
示例#4
0
  def __init__(self):
    self.frame_road_id = 0
    self.frame_wide_id = 0
    self.vipc_server = VisionIpcServer("camerad")

    self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 5, False, W, H)
    self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 5, False, W, H)
    self.vipc_server.start_listener()

    # set up for pyopencl rgb to yuv conversion
    self.ctx = cl.create_some_context()
    self.queue = cl.CommandQueue(self.ctx)
    cl_arg = f" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W * 3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG "

    # TODO: move rgb_to_yuv.cl to local dir once the frame stream camera is removed
    kernel_fn = os.path.join(BASEDIR, "selfdrive", "camerad", "transforms", "rgb_to_yuv.cl")
    with open(kernel_fn) as f:
      prg = cl.Program(self.ctx, f.read()).build(cl_arg)
      self.krnl = prg.rgb_to_yuv
    self.Wdiv4 = W // 4 if (W % 4 == 0) else (W + (4 - W % 4)) // 4
    self.Hdiv4 = H // 4 if (H % 4 == 0) else (H + (4 - H % 4)) // 4
示例#5
0
  def test_rotation(self):
    os.environ["LOGGERD_TEST"] = "1"
    Params().put("RecordFront", "1")

    expected_files = {"rlog", "qlog", "qcamera.ts", "fcamera.hevc", "dcamera.hevc", "ecamera.hevc"}
    streams = [(VisionStreamType.VISION_STREAM_ROAD, (*tici_f_frame_size, 2048*2346, 2048, 2048*1216), "roadCameraState"),
               (VisionStreamType.VISION_STREAM_DRIVER, (*tici_d_frame_size, 2048*2346, 2048, 2048*1216), "driverCameraState"),
               (VisionStreamType.VISION_STREAM_WIDE_ROAD, (*tici_e_frame_size, 2048*2346, 2048, 2048*1216), "wideRoadCameraState")]

    pm = messaging.PubMaster(["roadCameraState", "driverCameraState", "wideRoadCameraState"])
    vipc_server = VisionIpcServer("camerad")
    for stream_type, frame_spec, _ in streams:
      vipc_server.create_buffers_with_sizes(stream_type, 40, False, *(frame_spec))
    vipc_server.start_listener()

    for _ in range(5):
      num_segs = random.randint(2, 5)
      length = random.randint(1, 3)
      os.environ["LOGGERD_SEGMENT_LENGTH"] = str(length)
      managed_processes["loggerd"].start()
      managed_processes["encoderd"].start()

      fps = 20.0
      for n in range(1, int(num_segs*length*fps)+1):
        for stream_type, frame_spec, state in streams:
          dat = np.empty(frame_spec[2], dtype=np.uint8)
          vipc_server.send(stream_type, dat[:].flatten().tobytes(), n, n/fps, n/fps)

          camera_state = messaging.new_message(state)
          frame = getattr(camera_state, state)
          frame.frameId = n
          pm.send(state, camera_state)
        time.sleep(1.0/fps)

      managed_processes["loggerd"].stop()
      managed_processes["encoderd"].stop()

      route_path = str(self._get_latest_log_dir()).rsplit("--", 1)[0]
      for n in range(num_segs):
        p = Path(f"{route_path}--{n}")
        logged = {f.name for f in p.iterdir() if f.is_file()}
        diff = logged ^ expected_files
        self.assertEqual(len(diff), 0, f"didn't get all expected files. run={_} seg={n} {route_path=}, {diff=}\n{logged=} {expected_files=}")
示例#6
0
                   process_latency + network_latency + pc_latency),
                len(evta.data), sock_name)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Decode video streams and broacast on VisionIPC")
    parser.add_argument("addr", help="Address of comma three")
    parser.add_argument("--nvidia",
                        action="store_true",
                        help="Use nvidia instead of ffmpeg")
    parser.add_argument("--cams", default="0,1,2", help="Cameras to decode")
    args = parser.parse_args()

    all_cams = [
        ("roadEncodeData", VisionStreamType.VISION_STREAM_ROAD),
        ("wideRoadEncodeData", VisionStreamType.VISION_STREAM_WIDE_ROAD),
        ("driverEncodeData", VisionStreamType.VISION_STREAM_DRIVER),
    ]
    cams = dict([all_cams[int(x)] for x in args.cams.split(",")])

    vipc_server = VisionIpcServer("camerad")
    for vst in cams.values():
        vipc_server.create_buffers(vst, 4, False, W, H)
    vipc_server.start_listener()

    for k, v in cams.items():
        multiprocessing.Process(target=decoder,
                                args=(args.addr, k, vipc_server, v,
                                      args.nvidia)).start()
示例#7
0
class TestModeld(unittest.TestCase):
    def setUp(self):
        self.vipc_server = VisionIpcServer("camerad")
        self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD,
                                        40, False, *tici_f_frame_size)
        self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER,
                                        40, False, *tici_f_frame_size)
        self.vipc_server.create_buffers(
            VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False,
            *tici_f_frame_size)
        self.vipc_server.start_listener()

        self.sm = messaging.SubMaster(['modelV2', 'cameraOdometry'])
        self.pm = messaging.PubMaster([
            'roadCameraState', 'wideRoadCameraState', 'liveCalibration',
            'lateralPlan'
        ])

        managed_processes['modeld'].start()
        time.sleep(0.2)
        self.sm.update(1000)

    def tearDown(self):
        managed_processes['modeld'].stop()
        del self.vipc_server

    def _send_frames(self, frame_id, cams=None):
        if cams is None:
            cams = ('roadCameraState', 'wideRoadCameraState')

        cs = None
        for cam in cams:
            msg = messaging.new_message(cam)
            cs = getattr(msg, cam)
            cs.frameId = frame_id
            cs.timestampSof = int((frame_id * DT_MDL) * 1e9)
            cs.timestampEof = int(cs.timestampSof + (DT_MDL * 1e9))

            self.pm.send(msg.which(), msg)
            self.vipc_server.send(VIPC_STREAM[msg.which()], IMG_BYTES,
                                  cs.frameId, cs.timestampSof, cs.timestampEof)
        return cs

    def _wait(self):
        self.sm.update(5000)
        if self.sm['modelV2'].frameId != self.sm['cameraOdometry'].frameId:
            self.sm.update(1000)

    def test_modeld(self):
        for n in range(1, 500):
            cs = self._send_frames(n)
            self._wait()

            mdl = self.sm['modelV2']
            self.assertEqual(mdl.frameId, n)
            self.assertEqual(mdl.frameIdExtra, n)
            self.assertEqual(mdl.timestampEof, cs.timestampEof)
            self.assertEqual(mdl.frameAge, 0)
            self.assertEqual(mdl.frameDropPerc, 0)

            odo = self.sm['cameraOdometry']
            self.assertEqual(odo.frameId, n)
            self.assertEqual(odo.timestampEof, cs.timestampEof)

    def test_dropped_frames(self):
        """
      modeld should only run on consecutive road frames
    """
        frame_id = -1
        road_frames = list()
        for n in range(1, 50):
            if (random.random() < 0.1) and n > 3:
                cams = random.choice([(), ('wideRoadCameraState', )])
                self._send_frames(n, cams)
            else:
                self._send_frames(n)
                road_frames.append(n)
            self._wait()

            if len(road_frames) < 3 or road_frames[-1] - road_frames[-2] == 1:
                frame_id = road_frames[-1]

            mdl = self.sm['modelV2']
            odo = self.sm['cameraOdometry']
            self.assertEqual(mdl.frameId, frame_id)
            self.assertEqual(mdl.frameIdExtra, frame_id)
            self.assertEqual(odo.frameId, frame_id)
            if n != frame_id:
                self.assertFalse(self.sm.updated['modelV2'])
                self.assertFalse(self.sm.updated['cameraOdometry'])
示例#8
0
def model_replay(lr, frs):
    spinner = Spinner()
    spinner.update("starting model replay")

    vipc_server = VisionIpcServer("camerad")
    vipc_server.create_buffers(
        VisionStreamType.VISION_STREAM_ROAD, 40, False,
        *(tici_f_frame_size if TICI else eon_f_frame_size))
    vipc_server.create_buffers(
        VisionStreamType.VISION_STREAM_DRIVER, 40, False,
        *(tici_d_frame_size if TICI else eon_d_frame_size))
    vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 40,
                               False, *(tici_f_frame_size))
    vipc_server.start_listener()

    sm = messaging.SubMaster(['modelV2', 'driverState'])
    pm = messaging.PubMaster([
        'roadCameraState', 'wideRoadCameraState', 'driverCameraState',
        'liveCalibration', 'lateralPlan'
    ])

    try:
        managed_processes['modeld'].start()
        managed_processes['dmonitoringmodeld'].start()
        time.sleep(5)
        sm.update(1000)

        log_msgs = []
        last_desire = None
        recv_cnt = defaultdict(int)
        frame_idxs = defaultdict(int)

        # init modeld with valid calibration
        cal_msgs = [msg for msg in lr if msg.which() == "liveCalibration"]
        for _ in range(5):
            pm.send(cal_msgs[0].which(), cal_msgs[0].as_builder())
            time.sleep(0.1)

        msgs = defaultdict(list)
        for msg in lr:
            msgs[msg.which()].append(msg)

        for cam_msgs in zip_longest(msgs['roadCameraState'],
                                    msgs['wideRoadCameraState'],
                                    msgs['driverCameraState']):
            # need a pair of road/wide msgs
            if TICI and None in (cam_msgs[0], cam_msgs[1]):
                break

            for msg in cam_msgs:
                if msg is None:
                    continue

                if SEND_EXTRA_INPUTS:
                    if msg.which() == "liveCalibration":
                        last_calib = list(msg.liveCalibration.rpyCalib)
                        pm.send(msg.which(), replace_calib(msg, last_calib))
                    elif msg.which() == "lateralPlan":
                        last_desire = msg.lateralPlan.desire
                        dat = messaging.new_message('lateralPlan')
                        dat.lateralPlan.desire = last_desire
                        pm.send('lateralPlan', dat)

                if msg.which() in VIPC_STREAM:
                    msg = msg.as_builder()
                    camera_state = getattr(msg, msg.which())
                    img = frs[msg.which()].get(frame_idxs[msg.which()],
                                               pix_fmt="nv12")[0]
                    frame_idxs[msg.which()] += 1

                    # send camera state and frame
                    camera_state.frameId = frame_idxs[msg.which()]
                    pm.send(msg.which(), msg)
                    vipc_server.send(VIPC_STREAM[msg.which()],
                                     img.flatten().tobytes(),
                                     camera_state.frameId,
                                     camera_state.timestampSof,
                                     camera_state.timestampEof)

                    recv = None
                    if msg.which() in ('roadCameraState',
                                       'wideRoadCameraState'):
                        if not TICI or min(frame_idxs['roadCameraState'],
                                           frame_idxs['wideRoadCameraState']
                                           ) > recv_cnt['modelV2']:
                            recv = "modelV2"
                    elif msg.which() == 'driverCameraState':
                        recv = "driverState"

                    # wait for a response
                    with Timeout(15, f"timed out waiting for {recv}"):
                        if recv:
                            recv_cnt[recv] += 1
                            log_msgs.append(messaging.recv_one(sm.sock[recv]))

                    spinner.update(
                        "replaying models:  road %d/%d,  driver %d/%d" %
                        (frame_idxs['roadCameraState'],
                         frs['roadCameraState'].frame_count,
                         frame_idxs['driverCameraState'],
                         frs['driverCameraState'].frame_count))

            if any(frame_idxs[c] >= frs[c].frame_count
                   for c in frame_idxs.keys()):
                break

    finally:
        spinner.close()
        managed_processes['modeld'].stop()
        managed_processes['dmonitoringmodeld'].stop()

    return log_msgs