def main(): print("Prepare videos and frames") video = Video.objects.filter(path__contains='wim')[0] video_path = video.path video_name = video.item_name() sc = Client() stride = 1 input_stream = NamedVideoStream(sc, video_name, path=video_path) frame = sc.io.Input([input_stream]) # run on all frames # running_frame = sc.streams.Stride(frame, [stride]) hit_annotation = pickle.load( open('/app/data/pkl/hit_annotation_tennis.pkl', 'rb'))[video_name + '.mp4'] # run on selected frames # hit_dict = [] # for h in hit_annotation.values(): # hit_dict += h hit_dict = hit_annotation # frame_ids = [i for point in hit_dict for i in range(point[0]['fid']-25, point[-1]['fid']+25) ] frame_ids = [ i for point in hit_dict for i in range(point[0] - 25, point[-1] + 25) ] frame_ids.sort() running_frame = sc.streams.Gather(frame, [frame_ids]) print('Running Scanner DensePose op on %d frames' % (len(frame_ids))) densepose_frame = sc.ops.DensePoseDetectPerson(frame=running_frame, device=DeviceType.GPU, batch=1, confidence_threshold=0.5, nms_threshold=0.2) densepose_stream = NamedStream(sc, video_name + '_densepose') output_op = sc.io.Output(densepose_frame, [densepose_stream]) sc.run(output_op, scannerpy.common.PerfParams.estimate(), cache_mode=scannerpy.CacheMode.Overwrite) exit() print('Writing DensePose metadata into frames') drawn_frame = sc.ops.DrawDensePose(frame=running_frame, bundled_data=sc.io.Input( [densepose_stream]), min_score_thresh=0.5, show_body=True) drawn_stream = NamedVideoStream(sc, video_name + '_densepose_draw_uvbody') output_op = sc.io.Output(drawn_frame, [drawn_stream]) sc.run(output_op, scannerpy.common.PerfParams.estimate(), cache_mode=scannerpy.CacheMode.Overwrite) drawn_stream.save_mp4('/app/result/' + video_name + '_densepose_uvbody') drawn_frame = sc.ops.DrawDensePose(frame=running_frame, bundled_data=sc.io.Input( [densepose_stream]), min_score_thresh=0.5, show_body=False) drawn_stream = NamedVideoStream(sc, video_name + '_densepose_draw_full') output_op = sc.io.Output(drawn_frame, [drawn_stream]) sc.run(output_op, scannerpy.common.PerfParams.estimate(), cache_mode=scannerpy.CacheMode.Overwrite) drawn_stream.save_mp4('/app/result/' + video_name + '_densepose_full')
def test_multiple_outputs(cl): sampler = cl.streams.Range def run_job(args_1, args_2): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) sample_frame_1 = cl.streams.Range(input=frame, ranges=[args_1]) sample_frame_2 = cl.streams.Range(input=frame, ranges=[args_2]) output_op_1 = cl.io.Output(sample_frame_1, [NamedVideoStream(cl, 'test_mp_1')]) output_op_2 = cl.io.Output(sample_frame_2, [NamedVideoStream(cl, 'test_mp_2')]) cl.run([output_op_1, output_op_2], PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) # This should fail sampler_args_1 = {'start': 0, 'end': 30} sampler_args_2 = {'start': 0, 'end': 15} exc = False try: run_job(sampler_args_1, sampler_args_2) except ScannerException: exc = True assert exc # This should succeed sampler_args_1 = {'start': 0, 'end': 30} expected_rows_1 = 30 sampler_args_2 = {'start': 30, 'end': 60} expected_rows_2 = 30 run_job(sampler_args_1, sampler_args_2) num_rows = 0 for _ in cl.table('test_mp_1').column('frame').load(): num_rows += 1 assert num_rows == expected_rows_1 num_rows = 0 for _ in cl.table('test_mp_2').column('frame').load(): num_rows += 1 assert num_rows == expected_rows_2 # This should succeed frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) sample_frame_1 = cl.streams.Range(input=frame, ranges=[sampler_args_1]) output_op_1 = cl.io.Output(sample_frame_1, [NamedVideoStream(cl, 'test_mp_1')]) output_op_2 = cl.io.Output(sample_frame_1, [NamedVideoStream(cl, 'test_mp_2')]) cl.run([output_op_1, output_op_2], PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) num_rows = 0 for _ in cl.table('test_mp_1').column('frame').load(): num_rows += 1 assert num_rows == expected_rows_1
def test_load_video_column(cl): for name in ['test1', 'test1_inplace']: next(NamedVideoStream(cl, name).load())
def test_gather_video_column(cl): for name in ['test1', 'test1_inplace']: # Gather rows rows = [0, 10, 100, 200] frames = list(NamedVideoStream(cl, name).load(rows=rows)) assert len(frames) == len(rows)
def test_fault_tolerance(fault_cl): force_kill_spawn_port = 5012 normal_spawn_port = 5013 def worker_killer_task(config, master_address): from scannerpy import Config, start_worker, protobufs import time import grpc import subprocess import signal import os import scanner.metadata_pb2 as metadata_types import scanner.engine.rpc_pb2 as rpc_types import scanner.types_pb2 as misc_types # Spawn a worker that we will force kill script_dir = os.path.dirname(os.path.realpath(__file__)) with open(os.devnull, 'w') as fp: p = subprocess.Popen([ 'python3 ' + script_dir + '/spawn_worker.py {:d}'.format(force_kill_spawn_port) ], shell=True, stdout=fp, stderr=fp, preexec_fn=os.setsid) # Wait a bit for the worker to do its thing time.sleep(10) # Force kill worker process to trigger fault tolerance os.killpg(os.getpgid(p.pid), signal.SIGTERM) p.kill() p.communicate() # Wait for fault tolerance to kick in time.sleep(15) # Spawn the worker again subprocess.call([ 'python3 ' + script_dir + '/spawn_worker.py {:d}'.format(normal_spawn_port) ], shell=True) master_addr = fault_cl._master_address killer_process = Process(target=worker_killer_task, args=(fault_cl.config, master_addr)) killer_process.daemon = True killer_process.start() input = NamedVideoStream(fault_cl, 'test1') frame = fault_cl.io.Input([input]) range_frame = fault_cl.streams.Range(frame, ranges=[{ 'start': 0, 'end': 20 }]) sleep_frame = fault_cl.ops.SleepFrame(ignore=range_frame) output = NamedStream(fault_cl, 'test_fault') output_op = fault_cl.io.Output(sleep_frame, [output]) fault_cl.run(output_op, PerfParams.estimate(pipeline_instances_per_node=1), cache_mode=CacheMode.Overwrite, show_progress=False) assert output.len() == 20 # Shutdown the spawned worker channel = grpc.insecure_channel('localhost:' + str(normal_spawn_port), options=[('grpc.max_message_length', 24499183 * 2)]) worker = protobufs.WorkerStub(channel) try: worker.Shutdown(protobufs.Empty()) except grpc.RpcError as e: status = e.code() if status == grpc.StatusCode.UNAVAILABLE: print('could not shutdown worker!') exit(1) else: raise ScannerException( 'Worker errored with status: {}'.format(status)) killer_process.join()