示例#1
0
def FrameReader(fn,
                cache_prefix=None,
                readahead=False,
                readbehind=False,
                multithreaded=True):
    frame_type = fingerprint_video(fn)
    if frame_type == FrameType.raw:
        return RawFrameReader(fn)
    elif frame_type in (FrameType.h265_stream, FrameType.h264_pstream):
        index_data = get_video_index(fn, frame_type, cache_prefix)
        if index_data is not None and "predecom" in index_data:
            cache_path = cache_path_for_file_path(fn, cache_prefix)
            return MKVFrameReader(
                os.path.join(os.path.dirname(cache_path),
                             index_data["predecom"]))
        else:
            return StreamFrameReader(fn,
                                     frame_type,
                                     index_data,
                                     readahead=readahead,
                                     readbehind=readbehind,
                                     multithreaded=multithreaded)
    elif frame_type == FrameType.h264_mp4:
        return MP4FrameReader(fn, readahead=readahead)
    elif frame_type == FrameType.ffv1_mkv:
        return MKVFrameReader(fn)
    else:
        raise NotImplementedError(frame_type)
示例#2
0
def index_video(fn, frame_type=None, cache_prefix=None):
    cache_path = cache_path_for_file_path(fn, cache_prefix)

    if os.path.exists(cache_path):
        return

    if frame_type is None:
        frame_type = fingerprint_video(fn[0])

    if frame_type == FrameType.h264_pstream:
        #hack: try to index the whole route now
        route = Route.from_file_path(fn)

        camera_paths = route.camera_paths()
        if fn not in camera_paths:
            raise DataUnreadableError(
                "Not a contiguous route camera file: {}".format(fn))

        print("no pstream cache for %s, indexing route %s now" %
              (fn, route.name))
        index_pstream(route.camera_paths(), "h264", cache_prefix)
    elif frame_type == FrameType.h265_stream:
        index_stream(fn, "hevc", cache_prefix=cache_prefix)
    elif frame_type == FrameType.h264_mp4:
        index_mp4(fn, cache_prefix=cache_prefix)
示例#3
0
def get_video_index(fn, frame_type, cache_prefix=None):
  cache_path = cache_path_for_file_path(fn, cache_prefix)

  if not os.path.exists(cache_path):
    index_video(fn, frame_type, cache_prefix)

  if not os.path.exists(cache_path):
    return None
  with open(cache_path, "rb") as cache_file:
    return pickle.load(cache_file)
示例#4
0
def index_video(fn, frame_type=None, cache_prefix=None):
    cache_path = cache_path_for_file_path(fn, cache_prefix)

    if os.path.exists(cache_path):
        return

    if frame_type is None:
        frame_type = fingerprint_video(fn[0])

    if frame_type == FrameType.h265_stream:
        index_stream(fn, "hevc", cache_prefix=cache_prefix)
    else:
        raise NotImplementedError("Only h265 supported")
示例#5
0
  def cache_inner(fn, *args, **kwargs):
    cache_prefix = kwargs.pop('cache_prefix', None)
    cache_path = cache_path_for_file_path(fn, cache_prefix)

    if cache_path and os.path.exists(cache_path):
      with open(cache_path, "rb") as cache_file:
        cache_value = pickle.load(cache_file)
    else:
      cache_value = func(fn, *args, **kwargs)

      if cache_path:
        with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
          pickle.dump(cache_value, cache_file, -1)

    return cache_value
示例#6
0
def index_pstream(fns, typ, cache_prefix=None):
  if typ != "h264":
    raise NotImplementedError(typ)

  if not fns:
    raise DataUnreadableError("chffr h264 requires contiguous files")

  out_fns = [cache_path_for_file_path(fn, cache_prefix) for fn in fns]
  out_exists = map(os.path.exists, out_fns)
  if all(out_exists): return

  # load existing index files to avoid re-doing work
  existing_indexes = []
  for out_fn, exists in zip(out_fns, out_exists):
    existing = None
    if exists:
      with open(out_fn, "rb") as cache_file:
        existing = pickle.load(cache_file)
    existing_indexes.append(existing)

  # probe the first file
  if existing_indexes[0]:
    probe = existing_indexes[0]['probe']
  else:
    with FileReader(fns[0]) as f:
      probe = ffprobe(f.name, typ)

  global_prefix = None

  # get the video index of all the segments in this stream
  indexes = []
  for i, fn in enumerate(fns):
    if existing_indexes[i]:
      index = existing_indexes[i]['index']
      prefix = existing_indexes[i]['global_prefix']
    else:
      with FileReader(fn) as f:
        index, prefix = vidindex(f.name, typ)
    if i == 0:
      # assert prefix
      if not prefix:
        raise DataUnreadableError("vidindex failed for %s" % fn)
      global_prefix = prefix
    indexes.append(index)

  assert global_prefix

  if np.sum(indexes[0][:, 0] == H264_SLICE_I) <= 1:
    print("pstream %s is unseekable. pre-decompressing all the segments..." % (fns[0]))
    pstream_predecompress(fns, probe, indexes, global_prefix, cache_prefix)
    return

  # generate what's required to make each segment self-contained
  # (the partial GOP from the end of each segments are put asside to add
  #  to the start of the following segment)
  prefix_data = ["" for _ in fns]
  prefix_index = [[] for _ in fns]
  for i in range(len(fns)-1):
    if indexes[i+1][0, 0] == H264_SLICE_I and indexes[i+1][0, 1] <= 1:
      # next file happens to start with a i-frame, dont need use this file's end
      continue

    index = indexes[i]
    if i == 0 and np.sum(index[:, 0] == H264_SLICE_I) <= 1:
      raise NotImplementedError("No I-frames in pstream.")

    # find the last GOP in the index
    frame_b = len(index)-1
    while frame_b > 0 and index[frame_b, 0] != H264_SLICE_I:
      frame_b -= 1

    assert frame_b >= 0
    assert index[frame_b, 0] == H264_SLICE_I

    end_len = len(index)-frame_b

    with FileReader(fns[i]) as vid:
      vid.seek(index[frame_b, 1])
      end_data = vid.read()

    prefix_data[i+1] = end_data
    prefix_index[i+1] = index[frame_b:-1]
    # indexes[i] = index[:frame_b]

  for i, fn in enumerate(fns):
    cache_path = out_fns[i]

    if os.path.exists(cache_path):
      continue

    segment_index = {
      'index': indexes[i],
      'global_prefix': global_prefix,
      'probe': probe,
      'prefix_frame_data': prefix_data[i], # data to prefix the first GOP with
      'num_prefix_frames': len(prefix_index[i]), # number of frames to skip in the first GOP
    }

    with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
      pickle.dump(segment_index, cache_file, -1)
示例#7
0
def pstream_predecompress(fns, probe, indexes, global_prefix, cache_prefix, multithreaded=False):
  assert len(fns) == len(indexes)
  out_fns = [cache_path_for_file_path(fn, cache_prefix, extension=".predecom.mkv") for fn in fns]
  out_exists = map(os.path.exists, out_fns)
  if all(out_exists):
    return

  w = probe['streams'][0]['width']
  h = probe['streams'][0]['height']

  frame_size = w*h*3/2 # yuv420p

  decompress_proc = subprocess.Popen(
    ["ffmpeg",
     "-threads", "0" if multithreaded else "1",
     "-vsync", "0",
     "-f", "h264",
     "-i", "pipe:0",
     "-threads", "0" if multithreaded else "1",
     "-f", "rawvideo",
     "-pix_fmt", "yuv420p",
     "pipe:1"],
    stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=open("/dev/null", "wb"))

  def write_thread():
    for fn in fns:
      with FileReader(fn) as f:
        decompress_proc.stdin.write(f.read())
    decompress_proc.stdin.close()

  def read_frame():
    frame = None
    try:
      frame = decompress_proc.stdout.read(frame_size)
    except (IOError, ValueError):
      pass
    if frame is None or frame == "" or len(frame) != frame_size:
      raise DataUnreadableError("pre-decompression failed for %s" % fn)
    return frame

  t = threading.Thread(target=write_thread)
  t.daemon = True
  t.start()

  try:
    for fn, out_fn, out_exist, index in zip(fns, out_fns, out_exists, indexes):
      if out_exist:
        for fi in range(index.shape[0]-1):
          read_frame()
        continue

      with atomic_write_in_dir(out_fn, mode="w+b", overwrite=True) as out_tmp:
        compress_proc = subprocess.Popen(
          ["ffmpeg",
          "-threads", "0" if multithreaded else "1",
           "-y",
           "-vsync", "0",
           "-f", "rawvideo",
           "-pix_fmt", "yuv420p",
           "-s", "%dx%d" % (w, h),
           "-i", "pipe:0",
          "-threads", "0" if multithreaded else "1",
           "-f", "matroska",
           "-vcodec", "ffv1",
           "-g", "0",
           out_tmp.name],
          stdin=subprocess.PIPE, stderr=open("/dev/null", "wb"))
        try:
          for fi in range(index.shape[0]-1):
            frame = read_frame()
            compress_proc.stdin.write(frame)
          compress_proc.stdin.close()
        except:
          compress_proc.kill()
          raise

        assert compress_proc.wait() == 0

      cache_path = cache_path_for_file_path(fn, cache_prefix)
      with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
        pickle.dump({
          'predecom': os.path.basename(out_fn),
          'index': index,
          'probe': probe,
          'global_prefix': global_prefix,
        }, cache_file, -1)

  except:
    decompress_proc.kill()
    raise
  finally:
    t.join()

  rc = decompress_proc.wait()
  if rc != 0:
    raise DataUnreadableError(fns[0])
示例#8
0
#!/usr/bin/env python3
import os
import numpy as np

from tools.lib.logreader import LogReader
from tools.lib.framereader import FrameReader
from tools.lib.cache import cache_path_for_file_path
from selfdrive.test.process_replay.camera_replay import camera_replay

if __name__ == "__main__":
    lr = LogReader(os.path.expanduser('~/rlog.bz2'))
    fr = FrameReader(os.path.expanduser('~/fcamera.hevc'))
    desire = np.load(os.path.expanduser('~/desire.npy'))
    calib = np.load(os.path.expanduser('~/calib.npy'))

    try:
        msgs = camera_replay(list(lr), fr, desire=desire, calib=calib)
    finally:
        cache_path = cache_path_for_file_path(
            os.path.expanduser('~/fcamera.hevc'))
        if os.path.isfile(cache_path):
            os.remove(cache_path)

    output_size = len(np.frombuffer(msgs[0].model.rawPred, dtype=np.float32))
    output_data = np.zeros((len(msgs), output_size), dtype=np.float32)
    for i, msg in enumerate(msgs):
        output_data[i] = np.frombuffer(msg.model.rawPred, dtype=np.float32)
    np.save(os.path.expanduser('~/modeldata.npy'), output_data)

    print("Finished replay")