def search_vfs_single(data): name, fps, target_color, threshold, start_time, confidence, y1, x1, y2, x2 = data end_time = start_time + (1 / fps) # prefix = uuid.uuid4().hex with engine.VFS(transient=True, child_process=True): reconstruction.POOL_SIZE = 1 #prefix = uuid.uuid4().hex #api.read(name, f'out-{prefix}.rgb', t=(start_time, end_time), roi=(y1, x1, y2, x2), codec='rgb') # pass with api.read(name, t=(start_time, end_time), roi=(y1, x1, y2, x2), codec='rgb') as stream: #fn = api.read(name, t=(start_time, end_time), roi=(y1, x1, y2, x2), codec='rgb') #with open(fn, 'rb') as stream: buffer = np.frombuffer(stream.read(), dtype=np.uint8) frame = buffer.reshape(y2 - y1, x2 - x1, 3) #except: # pass if is_dominant_color(frame, target_color, threshold): return start_time, confidence, y1, x1, y2, x2 else: return None #-1, -1, -1, -1, -1, -1
def index_vfs(name, duration, fps, interval, results_filename): model, names = load_model() target_class = names.index('car') #hits = [] read_parameters = [] count = 0 model_resolution = 540, 540 with engine.VFS(transient=True), open(results_filename, "w") as f: reconstruction.POOL_SIZE = 1 for index in range(duration * interval): start_time = index * (1 / interval) end_time = start_time + (1 / fps) read_parameters.append((name, None, model_resolution, None, (start_time, end_time), 'rgb', None)) #if start_time > 30: # break #fs = api.readmany2(*zip(*read_parameters), workers=16) #afs = as_completed(fs) for future in as_completed( api.readmany2i(*zip(*read_parameters), workers=16)): with open(future.result(), "rb") as stream: frame = np.frombuffer(stream.read(), dtype=np.uint8).reshape( model_resolution[0], model_resolution[1], 3) start_time = read_parameters[future.index][4][0] for confidence, y1, x1, y2, x2 in inference( model, frame, target_class): f.write(f'{start_time},{confidence},{y1},{x1},{y2},{x2}\n') count += 1 logging.critical('Index.VFS: %d events indexed', count) #return hits """ filenames = api.readmany(*zip(*read_parameters), workers=2) #read_parameters = read_parameters[:1] #filenames = api.preadmany(read_parameters, workers=4) #for filename in filenames: for filename in filenames: ##with api.read(name, resolution=(540, 704), t=(start_time, end_time), codec='rgb') as filename: #filename = api.read(name, resolution=(540,704), t=(start_time, end_time), codec='rgb') with open(filename, "rb") as stream: #frame = np.fromfile('out.rgb', dtype=np.uint8).reshape((540,704,3)) frame = np.frombuffer(stream.read(), dtype=np.uint8).reshape(model_resolution[0], model_resolution[1], 3) for confidence, y1, x1, y2, x2 in inference(model, frame, target_class): f.write(f'{start_time},{confidence},{y1},{x1},{y2},{x2}\n') count += 1 logging.critical('Index.VFS: %d events indexed', count) #return hits """ """
def evaluate_quality(left_name, right_name, reference_directory, reference_left_filename, reference_right_filename): with engine.VFS(): logical_left = logicalvideo.LogicalVideo.get_by_name(left_name) physical_left = list(logical_left.videos())[0] gops_left = physical_left.gops() separate_psnr, overlap_psnr = 0, 0 separate_count, overlap_count = 0, 0 for gop in gops_left: if '{}' not in gop.filename: left_separate_psnr, left_count = evaluate_quality_separate( gop, gop.filename, reference_directory) separate_psnr += left_separate_psnr separate_count += left_count pass else: left_separate_psnr, left_overlap_psnr, left_count = evaluate_quality_overlap_left( gop, reference_directory) separate_psnr += left_separate_psnr overlap_psnr += left_overlap_psnr overlap_count += left_count separate_count += left_count pass logical_right = logicalvideo.LogicalVideo.get_by_name(right_name) physical_right = list(logical_right.videos())[0] gops_right = physical_right.gops() for gop in gops_right: if '{}' not in gop.filename: right_separate_psnr, right_count = evaluate_quality_separate( gop, gop.filename, reference_directory) separate_psnr += right_separate_psnr separate_count += right_count pass else: right_separate_psnr, right_overlap_psnr, right_count = evaluate_quality_overlap_right( gop, reference_directory) separate_psnr += right_separate_psnr overlap_psnr += right_overlap_psnr overlap_count += right_count separate_count += right_count print( f'Overall separate: {separate_psnr // separate_count}, overlap: {overlap_psnr // overlap_count}' ) #ssim_left = evaluate_ssim(left_name, reference_left_filename, source_filename='out_left.mp4') ssim_right = evaluate_ssim(right_name, reference_right_filename, source_filename='out_right.mp4') return separate_psnr / separate_count, overlap_psnr / overlap_count, ssim_left, ssim_right
def create_cache(n, name, T, R, P): with engine.VFS(transient=True): for i in range(n): r = random.choice(R) p = random.choice(P) t1 = random.randint(0, T-1) t2 = min(random.randint(t1 + 1, t1 + 60), T - 1) with open('cache.txt', 'w') as f: f.writelines([f'{i} cache {name} {(t1, t2)} {r} {p}\n']) print(f'{i} cache {name} {(t1, t2)} {r} {p}') api.read(name, f"out.{p}", resolution=r, t=(t1, t2), codec=p)
def stream_vfs(name, resolution, fps, index_filename): prefix = uuid.uuid4().hex read_parameters = [] with engine.VFS(transient=True): hits = parse_index_file(index_filename) for index, (start, end) in enumerate(group_by_frames(hits, fps)): read_parameters.append( (name, f'vfsout-{prefix}-{index}.mp4', resolution, None, (start, end), 'h264', None)) #api.read(name, f'out-{prefix}-{index}.mp4', t=(start, end), resolution=resolution, codec='h264') wait( list( api.readmany2(*zip(*read_parameters), workers=min(len(read_parameters), 10))))
def temp(): name = 'v' threshold = 50 fps = 30 with log_runtime('Search.VFS', level=logging.CRITICAL): with engine.VFS(transient=True): for start_time, confidence, y1, x1, y2, x2 in parse_index_file( 'index_cars_vfs.csv'): end_time = start_time + (1 / fps) api.read(name, "foo.mp4", t=(start_time, end_time), roi=(y1, x1, y2, x2), codec='rgb') pass exit(1)
if not os.path.exists(gop.filename): return get_random_gop(engine, name, r, t, index, avoid_ids) elif gop.id in (avoid_ids or []): return get_random_gop(engine, name, r, t, index, avoid_ids) else: return gop if __name__ == '__main__': logging.basicConfig(level=logging.INFO) n = 50 mt = 0.0 with engine.VFS(transient=True) as engine: api.vacuum() #api.write('v1', 'inputs/visualroad-4k-30a-gop30.mp4') #api.write('v2', 'inputs/visualroad-4k-30b-gop30.mp4') #api.read('v1', 'out.mp4', resolution=(1080, 1920), t=(0, 60)) #api.read('v2', 'out.mp4', resolution=(1080, 1920), t=(0, 60)) #api.read('v1', 'out.mp4', resolution=(540, 960), t=(0, 60)) #api.read('v2', 'out.mp4', resolution=(540, 960), t=(0, 60)) for i in range(n): r = 540, 960 # 1080, 1920 #2160, 3840 # # # gop1 = get_random_gop(engine, 'v1', r, 1, i) gop2 = get_random_gop(engine, 'v2', r, 1, i, [gop1.id]) # Compression
duration = time.time() - start_time print(f'{i} naive duration {duration:02f}') def read_random(i, reader, source, r, t, p): reader(i, source, r, (t, t+1), p) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) n = int(sys.argv[1]) if len(sys.argv) > 1 else 1 R = [(540, 960), (1080, 1920), (2160, 3840)] P = ['h264', 'hevc'] T = (0, 3600) with engine.VFS(transient=True): api.vacuum() #create_cache(500, "v", 3600, R, P) #exit(0) n = 50 ts = [random.randint(*T) for _ in range(n)] ps = [random.choice(P) for _ in range(n)] rs = [random.choice(R) for _ in range(n)] apply_deferred_compression = False apply_eviction = True #with engine.VFS(transient=True): # if 'v' not in api.list():
def search_vfs(name, fps, target_color, threshold, index_filename, result_filename): count = 0 read_parameters = [] futures = [] for start_time, confidence, y1, x1, y2, x2 in parse_index_file( index_filename): read_parameters.append( (name, None, None, (y1, x1, y2, x2), (start_time, start_time + (1 / fps)), 'rgb', None)) #print(len(read_parameters)) #read_parameters = read_parameters[:16] with ProcessPoolExecutor(max_workers=4) as pool, engine.VFS( transient=True, child_process=True): for future in api.readmany2i(*zip(*read_parameters), workers=32): futures.append( chain(pool, future, compute_dominant_color, read_parameters[future.index], target_color, threshold)) # futures.append(pool.submit(compute_dominant_color, future, read_parameters[future.index], target_color, threshold)) #for future in as_completed(api.readmany2i(*zip(*read_parameters), workers=16)): # futures.append(pool.submit(compute_dominant_color, read_parameters[future.index], future.result(), target_color, threshold)) with open(result_filename, 'w') as f: for future in futures: #as_completed(futures): if future.result() is not None: start_time, confidence, y1, x1, y2, x2 = future.result() f.write(f'{start_time},{confidence},{y1},{x1},{y2},{x2}\n') count += 1 """ with open(future.result(), "rb") as stream: buffer = np.frombuffer(stream.read(), dtype=np.uint8) start_time = read_parameters[future.index][4][0] y1, x1, y2, x2 = read_parameters[future.index][3] confidence = 0 frame = buffer.reshape(y2 - y1, x2 - x1, 3) if is_dominant_color(frame, target_color, threshold): f.write(f'{start_time},{confidence},{y1},{x1},{y2},{x2}\n') count += 1 """ logging.critical('Search.VFS: %d events indexed', count) return """ read_parameters = [] for start_time, confidence, y1, x1, y2, x2 in parse_index_file(index_filename): read_parameters.append((name, None, None, (y1, x1, y2, x2), (start_time, start_time + (1 / fps)), 'rgb', None)) with engine.VFS(transient=True), open(result_filename, 'w') as f: for i, filename in enumerate(api.readmany(*zip(*read_parameters), workers=16)): with open(filename, 'rb') as stream: buffer = np.frombuffer(stream.read(), dtype=np.uint8) start_time = read_parameters[i][4][0] y1, x1, y2, x2 = read_parameters[i][3] confidence = 0 frame = buffer.reshape(y2 - y1, x2 - x1, 3) if is_dominant_color(frame, target_color, threshold): f.write(f'{start_time},{confidence},{y1},{x1},{y2},{x2}\n') """ futures = [] count = 0 with ProcessPoolExecutor(max_workers=4) as pool, open( result_filename, 'w') as f: for data in ((name, fps, target_color, threshold, start_time, confidence, y1, x1, y2, x2) for start_time, confidence, y1, x1, y2, x2 in parse_index_file(index_filename)): future = pool.submit(search_vfs_single, data) futures.append(future) #wait(futures) for result in (f.result() for f in futures): if result is not None: start_time, confidence, y1, x1, y2, x2 = result f.write(f'{start_time},{confidence},{y1},{x1},{y2},{x2}\n') count += 1 logging.critical('Search.VFS: %d events indexed', count) #for result in pool.map(search_vfs_single, data): # if result is not None: # start_time, confidence, y1, x1, y2, x2 = result # f.write(f'{start_time},{confidence},{y1},{x1},{y2},{x2}\n') """
def ingest_vfs(filename, name=None): with engine.VFS(transient=True): api.write(name or "v%d" % int(time.time() * 1000), filename)
if __name__ == '__main__': logging.basicConfig(level=logging.CRITICAL) clients = 1 ingest_filename = "inputs/visualroad-2k-30a.mp4" duration = 3600 fps = 30 query_iterval = 3 target_color = np.array([127, 127, 127]) color_threshold = 50 resolution = (1080, 1920) # temp() with engine.VFS(transient=True) as instance: # api.vacuum() if 'v' not in api.list(): api.write("v", ingest_filename) with ProcessPoolExecutor(max_workers=clients) as pool: #with log_runtime('Index.VFS', level=logging.CRITICAL): # index_vfs("v", duration, fps, query_iterval, 'index_cars_vfs.csv') #with log_runtime('Search.VFS', level=logging.CRITICAL): # search_vfs("v", fps, target_color, color_threshold, 'index_cars_vfs.csv', 'index_colors_vfs.csv') with log_runtime('Stream.VFS', level=logging.CRITICAL): stream_vfs('v', resolution, fps, 'index_colors_vfs.csv') #with log_runtime('Index.FS', level=logging.CRITICAL): # index_fs(ingest_filename, duration, fps, query_iterval, 'index_cars_fs.csv') #with log_runtime('Search.FS', level=logging.CRITICAL):
import os import logging from vfs import api from vfs import engine from vfs.physicalvideo import PhysicalVideo from vfs.rawcompression import compress from vfs.videoio import encoded if __name__ == '__main__': logging.basicConfig(level=logging.INFO) t = (0, 12) level = 15 #with engine.VFS(transient=True): # api.write('v', 'inputs/visualroad-4k-30a.mp4') # api.read('v', '/dev/null', t=t, codec='rgb') #os.remove('out.rgb') if level is not None: with engine.VFS(transient=True): for physical in PhysicalVideo.get_all(): if not encoded[physical.codec]: for gop in physical.gops(): if gop.zstandard != level: compress(gop.id, level) with engine.VFS(transient=True): api.read('v', '/dev/null', t=t, codec='rgb')