Ejemplo n.º 1
0
def query(path):
    rv = []

    fps = 30
    sample_every = 3

    all_frames = VideoToFrames(LocalVideoDecoder(path))()
    sampled_frames = Slice(step=sample_every, end=300)(all_frames)
    detections = Detection(server_list=[
        'cloudlet031.elijah.cs.cmu.edu:5000',
        'cloudlet031.elijah.cs.cmu.edu:5001'
    ])(sampled_frames)
    person_detections = DetectionFilterFlatten(['person'], 0.3)(detections)

    coalesced_persons = CoalesceByLast(bounds_merge_op=Bounds3D.span,
                                       predicate=iou_at_least(0.1),
                                       epsilon=sample_every *
                                       3)(person_detections)

    long_coalesced_persons = Filter(pred_fn=lambda framegrps: framegrps.bounds.
                                    length() > fps * 3  # 3 seconds
                                    )(coalesced_persons)
    framegrps = VideoCropFrameGroup(
        LRULocalVideoDecoder(path))(long_coalesced_persons)

    for _, fg in enumerate(run_to_finish(framegrps)):
        rv.append((fg.bounds, fg.get_mp4(), 'mp4'))

    return rv
Ejemplo n.º 2
0
def query(path, session, *args, **kwargs):
    assert isinstance(session, Session)
    session.log('error', "Enter query fn")
    rv = []

    all_frames = VideoToFrames(LocalVideoDecoder(path))()
    sampled_frames = Slice(step=30, end=1800)(all_frames)
    detections = Detection('cloudlet031.elijah.cs.cmu.edu', 5000)(sampled_frames)
    person_detections = DetectionFilterFlatten(['bus'], 0.3)(detections)


    # SIFT filter
    fil_sift_extract_spec = FilterSpec(
        "sift-extract",
        code=Blob(open("fil_homography.py", 'rb').read()),
        arguments=['ExtractFilter', 'SIFT'],
        min_score=1.
    )

    fil_sift_match_spec = FilterSpec(
        "sift-match",
        code=Blob(open("fil_homography.py", 'rb').read()),
        arguments=['MatchFilter', 'SIFT', 0.7],
        dependencies=[fil_sift_extract_spec, ],
        blob_argument=Blob(open("brandenburg_hi_blob.zip", 'rb').read()),
        min_score=5
    )

    matched_patches = RGBDiamondSearch(session, [fil_sift_extract_spec, fil_sift_match_spec])(person_detections)

    for _, intrvl in enumerate(run_to_finish(matched_patches)):
        rv.append((intrvl.bounds, intrvl.jpeg, 'jpg'))

    return rv
Ejemplo n.º 3
0
def query(path, session, *args, **kwargs):
    assert isinstance(session, Session)
    session.log('error', "Enter query fn")
    rv = []

    all_frames = VideoToFrames(LocalVideoDecoder(path))()
    sampled_frames = Slice(step=30, end=1800)(all_frames)
    detections = Detection('cloudlet031.elijah.cs.cmu.edu',
                           5000)(sampled_frames)
    person_detections = DetectionFilterFlatten(['bus'], 0.3)(detections)

    # filter vertical 'vert' or 'horz'
    orient_spec = FilterSpec("orient",
                             Blob(FIL_ORIENTATION_CODE),
                             arguments=[
                                 'vert',
                             ],
                             min_score=1.)

    oriented_patches = RGBDiamondSearch(session, [
        orient_spec,
    ])(person_detections)

    for _, intrvl in enumerate(run_to_finish(oriented_patches)):
        rv.append((intrvl.bounds, intrvl.jpeg, 'jpg'))

    return rv
Ejemplo n.º 4
0
def query(path):
    rv = []

    all_frames = VideoToFrames(LocalVideoDecoder(path))()
    sampled_frames = Slice(step=30, end=300)(all_frames)
    detections = Detection('cloudlet031.elijah.cs.cmu.edu',
                           5000)(sampled_frames)
    person_detections = DetectionFilterFlatten(['person'], 0.3)(detections)

    for _, intrvl in enumerate(run_to_finish(person_detections)):
        rv.append((intrvl.bounds, intrvl.jpeg))

    return rv
Ejemplo n.º 5
0
    os.makedirs(OUTPUT_DIR, exist_ok=True)

    decoder = LocalVideoDecoder(INPUT_NAME)
    frame_count, fps = decoder.frame_count, int(np.round(decoder.fps))
    logger.info(
        f"Video info: frame_count {decoder.frame_count}, fps {decoder.fps}, raw_width {decoder.raw_width}, raw_height {decoder.raw_height}"
    )
    del decoder

    detect_step = 15

    all_frames = VideoToFrames(LocalVideoDecoder(INPUT_NAME))()
    sampled_frames = Slice(step=detect_step)(all_frames)
    detections = Detection('cloudlet031.elijah.cs.cmu.edu', 5000,
                           parallel=3)(sampled_frames)
    crop_persons = DetectionFilterFlatten(['person'], 0.5)(detections)

    track_trajectories = TrackFromBox(LRULocalVideoDecoder(INPUT_NAME,
                                                           cache_size=600),
                                      detect_step,
                                      name="track_person",
                                      parallel_workers=32)(crop_persons)

    def trajectory_merge_predicate(i1, i2):
        return meets_before(detect_step*2)(i1, i2) \
            and iou_at_least(0.1)(i1.payload['trajectory'][-1], i2.payload['trajectory'][0])

    def trajectory_payload_merge_op(p1, p2):
        logger.debug(
            f"Merging two trajectories of lengths {len(p1['trajectory'])} and {len(p2['trajectory'])}"
        )
Ejemplo n.º 6
0
def query(path, session):
    cv2.setNumThreads(8)
    # session.log('error', f"starting on path {path}. running threads: {len(threading.enumerate())}")

    query_result = {}

    decoder = LocalVideoDecoder(path)
    frame_count, fps = decoder.frame_count, int(np.round(decoder.fps))
    query_result['metadata'] = {
        'fps': fps,
        'frame_count': frame_count,
        'raw_w': decoder.raw_width,
        'raw_h': decoder.raw_height,
    }
    query_result['results'] = list()
    del decoder

    detect_step = int(fps)

    all_frames = VideoToFrames(LocalVideoDecoder(path))()
    sampled_frames = Slice(step=detect_step)(all_frames)
    detections = Detection(server_list=DETECTION_SERVERS,
                           parallel=2)(sampled_frames)
    crop_persons = DetectionFilterFlatten(['person'], 0.5)(detections)

    track_trajectories = TrackFromBox(
        LRULocalVideoDecoder(path, cache_size=900),
        detect_step,
        step=2,
        name="track_person",
    )(crop_persons)

    def trajectory_merge_predicate(i1, i2):
        return meets_before(detect_step*2)(i1, i2) \
            and iou_at_least(0.1)(i1.payload['trajectory'][-1], i2.payload['trajectory'][0])

    def trajectory_payload_merge_op(p1, p2):
        # logger.debug(f"Merging two trajectories of lengths {len(p1['trajectory'])} and {len(p2['trajectory'])}")
        return {'trajectory': p1['trajectory'] + p2['trajectory']}

    coalesced_trajectories = Coalesce(
        predicate=trajectory_merge_predicate,
        bounds_merge_op=Bounds3D.span,
        payload_merge_op=trajectory_payload_merge_op,
        epsilon=3)(track_trajectories)

    long_coalesced_persons = Filter(
        pred_fn=lambda intrvl: intrvl.bounds.length() >= fps * 5)(
            coalesced_trajectories)

    pairs = JoinWithTimeWindow(predicate=is_pair(),
                               merge_op=pair_merge_op,
                               window=150)(long_coalesced_persons,
                                           long_coalesced_persons)

    raw_fg = VideoCropFrameGroup(LRULocalVideoDecoder(path),
                                 copy_payload=True)(pairs)

    output = raw_fg
    output_sub = output.subscribe()
    output.start_thread_recursive()

    for _, intrvl in enumerate(output_sub):
        assert isinstance(intrvl, FrameGroupInterval)
        # RAM still goes up without mp4
        query_result['results'].append(
            (intrvl.bounds.copy(), intrvl.get_mp4()))
        del intrvl
        # query_result['results'].append((intrvl.bounds, b''))
        # session.log('error', "Find a pair!")

    return query_result