def test_py_variadic(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 30}]) out_frame = cl.ops.TestPyVariadic(range_frame, range_frame, range_frame) output = NamedVideoStream(cl, 'test_variadic') output_op = cl.io.Output(out_frame.lossless(), [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) next(output.load())
def test_stream_args(cl): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) resized_frame = cl.ops.Resize(frame=frame, width=[640], height=[480]) range_frame = cl.streams.Range(resized_frame, [(0, 10)]) output_stream = NamedVideoStream(cl, 'test_stream_args') output_op = cl.io.Output(range_frame, [output_stream]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) list(output_stream.load())
def test_lossless(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 30}]) blurred_frame = cl.ops.Blur(frame=range_frame, kernel_size=3, sigma=0.1) output = NamedVideoStream(cl, 'test_blur') output_op = cl.io.Output(blurred_frame.lossless(), [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) next(output.load())
def run_sampler_job(sampler, sampler_args, expected_rows): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) sample_frame = sampler(input=frame, **sampler_args) output = NamedVideoStream(cl, 'test_sample') output_op = cl.io.Output(sample_frame, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) num_rows = len(list(output.load())) assert num_rows == expected_rows
def test_slice(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) slice_frame = cl.streams.Slice(frame, partitions=[cl.partitioner.all(50)]) unsliced_frame = cl.streams.Unslice(slice_frame) output = NamedStream(cl, 'test_slicing') output_op = cl.io.Output(unsliced_frame, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) assert input.len() == output.len()
def run_job(args_1, args_2): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) sample_frame_1 = cl.streams.Range(input=frame, ranges=[args_1]) sample_frame_2 = cl.streams.Range(input=frame, ranges=[args_2]) output_op_1 = cl.io.Output(sample_frame_1, [NamedVideoStream(cl, 'test_mp_1')]) output_op_2 = cl.io.Output(sample_frame_2, [NamedVideoStream(cl, 'test_mp_2')]) cl.run([output_op_1, output_op_2], PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False)
def test_unbounded_state(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) slice_frame = cl.streams.Slice(frame, partitions=[cl.partitioner.all(50)]) increment = cl.ops.TestIncrementUnbounded(ignore=slice_frame) unsliced_increment = cl.streams.Unslice(increment) output = NamedStream(cl, 'test_unbounded_state') output_op = cl.io.Output(unsliced_increment, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) assert output.len() == input.len()
def test_save_mp4(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 30}]) blurred_frame = cl.ops.Blur(frame=range_frame, kernel_size=3, sigma=0.1) output = NamedVideoStream(cl, 'test_save_mp4') output_op = cl.io.Output(blurred_frame, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) f = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') f.close() output.save_mp4(f.name) run(['rm', '-rf', f.name])
def test_bind_op_args(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input, input]) range_frame = cl.streams.Range(frame, ranges=[{ 'start': 0, 'end': 1 } for _ in range(2)]) test_out = cl.ops.TestPy(frame=range_frame, kernel_arg=1, x=[1, 10], y=[5, 50]) outputs = [NamedStream(cl, 'test_hist_0'), NamedStream(cl, 'test_hist_1')] output_op = cl.io.Output(test_out, outputs) pairs = [(1, 5), (10, 50)] cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) for i, (x, y) in enumerate(pairs): values = list(outputs[i].load()) p = values[0] assert p['x'] == x assert p['y'] == y
def test_overlapping_slice(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) slice_frame = cl.streams.Slice(frame, partitions=[ cl.partitioner.strided_ranges( [(0, 15), (5, 25), (15, 35)], 1) ]) sample_frame = cl.streams.Range(slice_frame, ranges=[ SliceList([ { 'start': 0, 'end': 10 }, { 'start': 5, 'end': 15 }, { 'start': 5, 'end': 15 }, ]) ]) unsliced_frame = cl.streams.Unslice(sample_frame) output = NamedStream(cl, 'test_slicing') output_op = cl.io.Output(unsliced_frame, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) assert output.len() == 30
def run_spacer_job(spacer, spacing): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) hist = cl.ops.Histogram(frame=frame) space_hist = spacer(input=hist, spacings=[spacing]) output = NamedStream(cl, 'test_space') output_op = cl.io.Output(space_hist, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) return output
def test_python_stencil_batch_kernel(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 30}]) test_out = cl.ops.TestPyStencilBatch(frame=range_frame, batch=50) output = NamedStream(cl, 'test_hist') output_op = cl.io.Output(test_out, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) next(output.load())
def run(self, cl, device): input = NamedVideoStream(cl, 'test1_inplace') frame = cl.io.Input([input]) hist = cl.ops.Histogram(frame=frame, device=device) output = NamedStream(cl, 'test_hist') output_op = cl.io.Output(hist, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) next(output.load())
def test_space(cl): def run_spacer_job(spacer, spacing): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) hist = cl.ops.Histogram(frame=frame) space_hist = spacer(input=hist, spacings=[spacing]) output = NamedStream(cl, 'test_space') output_op = cl.io.Output(space_hist, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) return output # # Repeat spacing_distance = 8 table = run_spacer_job(cl.streams.Repeat, spacing_distance) num_rows = 0 for hist in table.load(): # Verify outputs are repeated correctly if num_rows % spacing_distance == 0: ref_hist = hist assert len(hist) == 3 for c in range(len(hist)): assert (ref_hist[c] == hist[c]).all() num_rows += 1 assert num_rows == NamedVideoStream(cl, 'test1').len() * spacing_distance # Null table = run_spacer_job(cl.streams.RepeatNull, spacing_distance) num_rows = 0 for hist in table.load(): # Verify outputs are None for null rows if num_rows % spacing_distance == 0: assert not isinstance(hist, NullElement) assert len(hist) == 3 assert hist[0].shape[0] == 16 else: assert isinstance(hist, NullElement) num_rows += 1 assert num_rows == NamedVideoStream(cl, 'test1').len() * spacing_distance
def test_job_timeout(timeout_cl): @scannerpy.register_python_op() def timeout_fn(self, frame: FrameType) -> bytes: time.sleep(5) return bytes('what', 'utf-8') cl = timeout_cl input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 1}]) sleep_frame = cl.ops.timeout_fn(frame=range_frame) output = NamedVideoStream(cl, 'test_timeout') output_op = cl.io.Output(sleep_frame, [output]) cl.run(output_op, PerfParams.estimate(pipeline_instances_per_node=1), task_timeout=0.1, cache_mode=CacheMode.Overwrite, show_progress=False) assert not output.committed()
def test_job_blacklist(blacklist_cl): # NOTE(wcrichto): this class must NOT be at the top level. If it is, then pytest injects # some of its dependencies, and sending this class to an external Scanner process will fail # with a missing "py_test" import.. @scannerpy.register_python_op() class TestPyFail(Kernel): def execute(self, frame: FrameType) -> bytes: raise ScannerException('Test') cl = blacklist_cl input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 1}]) failed_output = cl.ops.TestPyFail(frame=range_frame) output = NamedVideoStream(cl, 'test_py_fail') output_op = cl.io.Output(failed_output, [output]) cl.run(output_op, PerfParams.estimate(pipeline_instances_per_node=1), cache_mode=CacheMode.Overwrite, show_progress=False) assert not output.committed()
def test_auto_ingest(cl): (vid1_path, vid2_path) = download_videos() input = NamedVideoStream(cl, 'test3', path=vid1_path) frame = cl.io.Input([input]) hist = cl.ops.Histogram(frame=frame) output = NamedStream(cl, 'test_hist') output_op = cl.io.Output(hist, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) run(['rm', '-rf', vid1_path, vid2_path])
def test_wider_than_packet_stencil(cl): input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) sample_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 3}]) flow = cl.ops.OpticalFlow(frame=sample_frame, stencil=[0, 1]) output = NamedStream(cl, 'test_stencil') output_op = cl.io.Output(flow, [output]) cl.run(output_op, PerfParams.manual(1, 1, pipeline_instances_per_node=1), cache_mode=CacheMode.Overwrite, show_progress=False) assert output.len() == 3
def main(): print("Prepare videos and frames") # video = Video.objects.filter(path__contains='men_single_final_gold')[0] # movie_path = video.path # movie_name = video.item_name() sc = Client() stride = 1 input_stream = NamedVideoStream(sc, movie_name, path=movie_path) frame = sc.io.Input([input_stream]) strided_frame = sc.streams.Stride(frame, [stride]) print('Running Scanner MaskRCNN op') maskrcnn_frame = sc.ops.MaskRCNNDetectObjects( frame=strided_frame, device=DeviceType.GPU, #if sc.has_gpu() else DeviceType.CPU, batch=8, confidence_threshold=0.5, min_image_size=800) maskrcnn_stream = NamedStream(sc, movie_name + '_maskrcnn') output_op = sc.io.Output(maskrcnn_frame, [maskrcnn_stream]) sc.run(output_op, scannerpy.common.PerfParams.estimate(), cache_mode=scannerpy.CacheMode.Ignore) print('Writing MaskRCNN metadata into frames') drawn_frame = sc.ops.DrawMaskRCNN(frame=strided_frame, bundled_data=sc.io.Input( [maskrcnn_stream]), min_score_thresh=0.5) drawn_stream = NamedVideoStream(sc, movie_name + '_maskrcnn_draw') output_op = sc.io.Output(drawn_frame, [drawn_stream]) sc.run(output_op, scannerpy.common.PerfParams.estimate(), cache_mode=scannerpy.CacheMode.Overwrite) drawn_stream.save_mp4('/app/result/' + movie_name + '_maskrcnn')
def test_fetch_resources(cl): with tempfile.NamedTemporaryFile() as f: f.write(b'0') f.flush() input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 3}]) test_out = cl.ops.ResourceTest(frame=frame, path=f.name) output = NamedStream(cl, 'test_hist') output_op = cl.io.Output(test_out, [output]) cl.run(output_op, PerfParams.estimate(pipeline_instances_per_node=2), cache_mode=CacheMode.Overwrite, show_progress=False)
def test_perf_params(cl): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) hist = cl.ops.Histogram(frame=frame) ghist = cl.streams.Gather(hist, [[0]]) output_op = cl.io.Output(ghist, [NamedStream(cl, '_ignore')]) cl.run(output_op, PerfParams.manual(10, 10), show_progress=False, cache_mode=CacheMode.Overwrite) cl.run(output_op, PerfParams.estimate(), show_progress=False, cache_mode=CacheMode.Overwrite)
def test_slice_args(cl): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) slice_frame = cl.streams.Slice( frame, [cl.partitioner.ranges([[0, 1], [1, 2], [2, 3]])]) test = cl.ops.TestSliceArgs(frame=slice_frame, arg=[SliceList([i for i in range(3)])]) unsliced_frame = cl.streams.Unslice(test) output = NamedStream(cl, 'test_slicing') output_op = cl.io.Output(unsliced_frame, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) num_rows = 0 list(output.load())
def test_no_workers(no_workers_cl): cl = no_workers_cl input = NamedVideoStream(cl, 'test1') frame = cl.io.Input([input]) hist = cl.ops.Histogram(frame=frame) output_op = cl.io.Output(hist, [NamedStream(cl, '_ignore')]) exc = False try: cl.run(output_op, PerfParams.estimate(), show_progress=False, cache_mode=CacheMode.Overwrite) except ScannerException: exc = True assert exc
def test_profiler(cl): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) hist = cl.ops.Histogram(frame=frame) ghist = cl.streams.Gather(hist, [[0]]) output_op = cl.io.Output(ghist, [NamedStream(cl, '_ignore')]) time_start = time.time() job_id = cl.run(output_op, PerfParams.estimate(), show_progress=False, cache_mode=CacheMode.Overwrite) print('Time', time.time() - time_start) profile = cl.get_profile(job_id) f = tempfile.NamedTemporaryFile(delete=False, suffix='.trace') f.close() profile.write_trace(f.name) profile.statistics() run(['rm', '-f', f.name])
def test_pose(sc): vid = [NamedVideoStream(sc, 'test1')] frame = sc.io.Input(vid) frame_sample = sc.streams.Gather(frame, [list(range(0, 1000, 100))]) pose = sc.ops.OpenPose( frame=frame_sample, device=DeviceType.GPU, pose_num_scales=6, pose_scale_gap=0.16, compute_hands=True, hand_num_scales=6, hand_scale_gap=0.16, compute_face=True, batch=5 ) output = NamedStream(sc, 'test1-pose') output_op = sc.io.Output(pose, [output]) sc.run(output_op, PerfParams.estimate())
def test_bounded_state(cl): warmup = 3 frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) increment = cl.ops.TestIncrementBounded(ignore=frame, bounded_state=warmup) sampled_increment = cl.streams.Gather(increment, indices=[[0, 10, 25, 26, 27]]) output = NamedStream(cl, 'test_bounded_state') output_op = cl.io.Output(sampled_increment, [output]) cl.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) num_rows = 0 expected_output = [0, warmup, warmup, warmup + 1, warmup + 2] for buf in output.load(): (val, ) = struct.unpack('=q', buf) assert val == expected_output[num_rows] num_rows += 1 assert num_rows == 5
def load(self, ty=None, fn=None, rows=None, workers=16): """ Loads the results of a Scanner computation into Python. Kwargs: fn: Optional function to apply to the binary blobs as they are read in. Returns: Generator that yields either a numpy array for frame columns or a binary blob for non-frame columns (optionally processed by the `fn`). """ self._load_meta() # If the column is a video, then dump the requested frames to disk as # PNGs and return the decoded PNGs if (self._descriptor.type == protobufs.Video and self._video_descriptor.codec_type == protobufs.VideoDescriptor.H264): png_table_name = self._sc._png_dump_prefix.format( self._table.name(), self._name) frame = self._sc.io.Input([NamedVideoStream(self._sc, self._table.name())]) enc_input = frame if rows is not None: sampled_frame = self._sc.streams.Gather(frame, indices=[rows]) enc_input = sampled_frame img = self._sc.ops.ImageEncoder(frame=enc_input) output = [NamedStream(self._sc, png_table_name)] output_op = self._sc.io.Output(img, output) self._sc.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) return output[0].load() elif self._descriptor.type == protobufs.Video: frame_type = self._video_descriptor.frame_type if frame_type == protobufs.U8: dtype = np.uint8 elif frame_type == protobufs.F32: dtype = np.float32 elif frame_type == protobufs.F64: dtype = np.float64 def raw_frame_gen(shape0, shape1, shape2, typ): def parser(bufs): output = np.frombuffer(bufs, dtype=typ) return output.reshape((shape0, shape1, shape2)) return parser parser_fn = raw_frame_gen( self._video_descriptor.height, self._video_descriptor.width, self._video_descriptor.channels, dtype) return self._load(fn=parser_fn, rows=rows, workers=workers) else: # Use a deserialize function if provided. # If not, use a type if provided. # If not, attempt to determine the type from the column's table descriptor. # If that doesn't work, then assume no deserialization function, and return bytes. if fn is None: if ty is None: type_name = self._descriptor.type_name if type_name != "": ty = scannertypes.get_type_info_cpp(type_name) if ty is not None: fn = ty.deserialize return self._load(fn, rows=rows, workers=workers)
def main(): print("Prepare videos and frames") video = Video.objects.filter(path__contains='wim')[0] video_path = video.path video_name = video.item_name() sc = Client() stride = 1 input_stream = NamedVideoStream(sc, video_name, path=video_path) frame = sc.io.Input([input_stream]) # run on all frames # running_frame = sc.streams.Stride(frame, [stride]) hit_annotation = pickle.load( open('/app/data/pkl/hit_annotation_tennis.pkl', 'rb'))[video_name + '.mp4'] # run on selected frames # hit_dict = [] # for h in hit_annotation.values(): # hit_dict += h hit_dict = hit_annotation # frame_ids = [i for point in hit_dict for i in range(point[0]['fid']-25, point[-1]['fid']+25) ] frame_ids = [ i for point in hit_dict for i in range(point[0] - 25, point[-1] + 25) ] frame_ids.sort() running_frame = sc.streams.Gather(frame, [frame_ids]) print('Running Scanner DensePose op on %d frames' % (len(frame_ids))) densepose_frame = sc.ops.DensePoseDetectPerson(frame=running_frame, device=DeviceType.GPU, batch=1, confidence_threshold=0.5, nms_threshold=0.2) densepose_stream = NamedStream(sc, video_name + '_densepose') output_op = sc.io.Output(densepose_frame, [densepose_stream]) sc.run(output_op, scannerpy.common.PerfParams.estimate(), cache_mode=scannerpy.CacheMode.Overwrite) exit() print('Writing DensePose metadata into frames') drawn_frame = sc.ops.DrawDensePose(frame=running_frame, bundled_data=sc.io.Input( [densepose_stream]), min_score_thresh=0.5, show_body=True) drawn_stream = NamedVideoStream(sc, video_name + '_densepose_draw_uvbody') output_op = sc.io.Output(drawn_frame, [drawn_stream]) sc.run(output_op, scannerpy.common.PerfParams.estimate(), cache_mode=scannerpy.CacheMode.Overwrite) drawn_stream.save_mp4('/app/result/' + video_name + '_densepose_uvbody') drawn_frame = sc.ops.DrawDensePose(frame=running_frame, bundled_data=sc.io.Input( [densepose_stream]), min_score_thresh=0.5, show_body=False) drawn_stream = NamedVideoStream(sc, video_name + '_densepose_draw_full') output_op = sc.io.Output(drawn_frame, [drawn_stream]) sc.run(output_op, scannerpy.common.PerfParams.estimate(), cache_mode=scannerpy.CacheMode.Overwrite) drawn_stream.save_mp4('/app/result/' + video_name + '_densepose_full')
def test_gather_video_column(cl): for name in ['test1', 'test1_inplace']: # Gather rows rows = [0, 10, 100, 200] frames = list(NamedVideoStream(cl, name).load(rows=rows)) assert len(frames) == len(rows)
def test_multiple_outputs(cl): sampler = cl.streams.Range def run_job(args_1, args_2): frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) sample_frame_1 = cl.streams.Range(input=frame, ranges=[args_1]) sample_frame_2 = cl.streams.Range(input=frame, ranges=[args_2]) output_op_1 = cl.io.Output(sample_frame_1, [NamedVideoStream(cl, 'test_mp_1')]) output_op_2 = cl.io.Output(sample_frame_2, [NamedVideoStream(cl, 'test_mp_2')]) cl.run([output_op_1, output_op_2], PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) # This should fail sampler_args_1 = {'start': 0, 'end': 30} sampler_args_2 = {'start': 0, 'end': 15} exc = False try: run_job(sampler_args_1, sampler_args_2) except ScannerException: exc = True assert exc # This should succeed sampler_args_1 = {'start': 0, 'end': 30} expected_rows_1 = 30 sampler_args_2 = {'start': 30, 'end': 60} expected_rows_2 = 30 run_job(sampler_args_1, sampler_args_2) num_rows = 0 for _ in cl.table('test_mp_1').column('frame').load(): num_rows += 1 assert num_rows == expected_rows_1 num_rows = 0 for _ in cl.table('test_mp_2').column('frame').load(): num_rows += 1 assert num_rows == expected_rows_2 # This should succeed frame = cl.io.Input([NamedVideoStream(cl, 'test1')]) sample_frame_1 = cl.streams.Range(input=frame, ranges=[sampler_args_1]) output_op_1 = cl.io.Output(sample_frame_1, [NamedVideoStream(cl, 'test_mp_1')]) output_op_2 = cl.io.Output(sample_frame_1, [NamedVideoStream(cl, 'test_mp_2')]) cl.run([output_op_1, output_op_2], PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False) num_rows = 0 for _ in cl.table('test_mp_1').column('frame').load(): num_rows += 1 assert num_rows == expected_rows_1