def query(path): rv = [] fps = 30 sample_every = 3 all_frames = VideoToFrames(LocalVideoDecoder(path))() sampled_frames = Slice(step=sample_every, end=300)(all_frames) detections = Detection(server_list=[ 'cloudlet031.elijah.cs.cmu.edu:5000', 'cloudlet031.elijah.cs.cmu.edu:5001' ])(sampled_frames) person_detections = DetectionFilterFlatten(['person'], 0.3)(detections) coalesced_persons = CoalesceByLast(bounds_merge_op=Bounds3D.span, predicate=iou_at_least(0.1), epsilon=sample_every * 3)(person_detections) long_coalesced_persons = Filter(pred_fn=lambda framegrps: framegrps.bounds. length() > fps * 3 # 3 seconds )(coalesced_persons) framegrps = VideoCropFrameGroup( LRULocalVideoDecoder(path))(long_coalesced_persons) for _, fg in enumerate(run_to_finish(framegrps)): rv.append((fg.bounds, fg.get_mp4(), 'mp4')) return rv
def query(path, session, *args, **kwargs): assert isinstance(session, Session) session.log('error', "Enter query fn") rv = [] all_frames = VideoToFrames(LocalVideoDecoder(path))() sampled_frames = Slice(step=30, end=1800)(all_frames) detections = Detection('cloudlet031.elijah.cs.cmu.edu', 5000)(sampled_frames) person_detections = DetectionFilterFlatten(['bus'], 0.3)(detections) # SIFT filter fil_sift_extract_spec = FilterSpec( "sift-extract", code=Blob(open("fil_homography.py", 'rb').read()), arguments=['ExtractFilter', 'SIFT'], min_score=1. ) fil_sift_match_spec = FilterSpec( "sift-match", code=Blob(open("fil_homography.py", 'rb').read()), arguments=['MatchFilter', 'SIFT', 0.7], dependencies=[fil_sift_extract_spec, ], blob_argument=Blob(open("brandenburg_hi_blob.zip", 'rb').read()), min_score=5 ) matched_patches = RGBDiamondSearch(session, [fil_sift_extract_spec, fil_sift_match_spec])(person_detections) for _, intrvl in enumerate(run_to_finish(matched_patches)): rv.append((intrvl.bounds, intrvl.jpeg, 'jpg')) return rv
def query(path, session, *args, **kwargs): assert isinstance(session, Session) session.log('error', "Enter query fn") rv = [] all_frames = VideoToFrames(LocalVideoDecoder(path))() sampled_frames = Slice(step=30, end=1800)(all_frames) detections = Detection('cloudlet031.elijah.cs.cmu.edu', 5000)(sampled_frames) person_detections = DetectionFilterFlatten(['bus'], 0.3)(detections) # filter vertical 'vert' or 'horz' orient_spec = FilterSpec("orient", Blob(FIL_ORIENTATION_CODE), arguments=[ 'vert', ], min_score=1.) oriented_patches = RGBDiamondSearch(session, [ orient_spec, ])(person_detections) for _, intrvl in enumerate(run_to_finish(oriented_patches)): rv.append((intrvl.bounds, intrvl.jpeg, 'jpg')) return rv
def query(path): rv = [] all_frames = VideoToFrames(LocalVideoDecoder(path))() sampled_frames = Slice(step=30, end=300)(all_frames) detections = Detection('cloudlet031.elijah.cs.cmu.edu', 5000)(sampled_frames) person_detections = DetectionFilterFlatten(['person'], 0.3)(detections) for _, intrvl in enumerate(run_to_finish(person_detections)): rv.append((intrvl.bounds, intrvl.jpeg)) return rv
from stsearch.interval import * from stsearch.op import * from stsearch.utils import run_to_finish from stsearch.videolib import * INPUT_NAME = "example.mp4" OUTPUT_DIR = Path(__file__).stem + "_output" if __name__ == "__main__": os.makedirs(OUTPUT_DIR, exist_ok=True) intervals_to_crop = [ Interval(Bounds3D(t1=30, t2=330, x1=0, x2=.5, y1=0, y2=.5), {'msg': "ts 30-330 upper left crop"}), Interval(Bounds3D(300, 420, .25, .75, .4, .6), {'msg': "ts 300-420 center crop"}) ] crop_intervals = FromIterable(intervals_to_crop)() framegroups = LocalVideoCropFrameGroup(INPUT_NAME)(crop_intervals) for k, fg in enumerate(run_to_finish(framegroups)): assert isinstance(fg, FrameGroupInterval) out_name = f"{OUTPUT_DIR}/{k}-{fg['t1']}-{fg['t2']}.mp4" fg.savevideo(out_name) logger.debug(f"saved {out_name}") logger.info( "You should find 2 .mp4 files, cropped -- spatially and temporally -- from the input." )