Пример #1
0
def main(mode, path=None, cache_dir='okutama_cache'):
    assert mode in ('remote', 'local')

    if mode == 'remote':
        from pathlib import Path
        import pickle
        import time

        import pandas as pd
        from stsearch.diamond_wrap.result_pb2 import STSearchResult
        from stsearch.diamond_wrap.utils import start_stsearch_by_script, OUTPUT_ATTR

        tic = time.time()

        results = start_stsearch_by_script(open(__file__, 'rb').read())

        for i, res in enumerate(results):
            # each `res` corresponds to results of a clip_id
            object_id = res['_ObjectID'].decode()
            clip_id = Path(object_id).stem
            print(
                f"=> Result {i}. Time {(time.time()-tic)/60:.1f} min. Clip {clip_id}"
            )

            filter_result: STSearchResult = STSearchResult()
            filter_result.ParseFromString(res[OUTPUT_ATTR])
            query_result = pickle.loads(filter_result.query_result)
            query_result['clip_id'] = clip_id

            with open(
                    Path(cache_dir) / f"{query_result['file_digest']}.json",
                    'wt') as f:
                json.dump(query_result, f, indent=2)

    elif mode == 'local':
        from pathlib import Path
        assert path is not None

        query_result = query(path, session=None)
        clip_id = Path(path).stem
        logger.info(query_result)
Пример #2
0
def main(output="okutama_metadata.csv"):
    from pathlib import Path
    import pickle
    import pandas as pd
    from stsearch.diamond_wrap.result_pb2 import STSearchResult
    from stsearch.diamond_wrap.utils import start_stsearch_by_script, OUTPUT_ATTR

    results = start_stsearch_by_script(open(__file__, 'rb').read())

    total_frames = 0
    total_hrs = 0
    save_results = []


    for i, res in enumerate(results):
        object_id = res['_ObjectID'].decode()
        clip_id = Path(object_id).stem

        filter_result: STSearchResult = STSearchResult()
        filter_result.ParseFromString(res[OUTPUT_ATTR])
        query_result = pickle.loads(filter_result.query_result)
        print(f"{clip_id}, {query_result}")
        total_frames += query_result['frame_count']
        total_hrs += query_result['frame_count'] / query_result['fps'] / 3600

        save_results.append(
            {
                'clip_id': clip_id,
                'frame_count': query_result['frame_count'],
                'fps': query_result['fps'],
                'height': query_result['height'],
                'width': query_result['width']
            }
        )

        pd.DataFrame(save_results).to_csv(output)

    print(f"total_hrs: {total_hrs}. Total frames: {total_frames}. Count: {i}")
Пример #3
0
def main(mode,
         path=None,
         result_file="person_and_object_result.csv",
         get_mp4=True,
         mp4_dir="person_and_object_mp4"):
    assert mode in ('remote', 'local')

    if mode == 'remote':
        from pathlib import Path
        import pickle
        import time

        import pandas as pd
        from stsearch.diamond_wrap.result_pb2 import STSearchResult
        from stsearch.diamond_wrap.utils import start_stsearch_by_script, OUTPUT_ATTR

        tic = time.time()

        results = start_stsearch_by_script(open(__file__, 'rb').read())

        save_results = []

        for i, res in enumerate(results):
            # each `res` corresponds to results of a clip_id
            object_id = res['_ObjectID'].decode()
            clip_id = Path(object_id).stem
            print(
                f"=> Result {i}. Time {(time.time()-tic)/60:.1f} min. Clip {clip_id}"
            )

            filter_result: STSearchResult = STSearchResult()
            filter_result.ParseFromString(res[OUTPUT_ATTR])
            query_result = pickle.loads(filter_result.query_result)
            metadata = query_result['metadata']

            for seq, (b, mp4) in enumerate(query_result['results']):
                if len(mp4) > 0:
                    with open(
                            f"{mp4_dir}/{clip_id}_{seq}_{b['t1']}_{b['t2']}.mp4",
                            'wb') as f:
                        f.write(mp4)
                        logger.info(f"saved {f.name}")

                save_results.append({
                    'clip_id': clip_id,
                    't1': b['t1'],
                    't2': b['t2'],
                    'x1': b['x1'],
                    'x2': b['x2'],
                    'y1': b['y1'],
                    'y2': b['y2'],
                    'result_size': len(mp4),
                    'frame_count': metadata['frame_count'],
                    'fps': metadata['fps'],
                    'width': metadata['width'],
                    'height': metadata['height'],
                })

            logger.info(f"# results = {len(query_result['results'])}")
            del query_result['results']
            logger.info(query_result)

            pd.DataFrame(save_results).to_csv(result_file)

    elif mode == 'local':
        from pathlib import Path

        assert path is not None
        global OKUTAMA_CACHE_DIR
        OKUTAMA_CACHE_DIR = "/home/zf/video-analytics/stsearch/okutama_experiment/okutama_cache"
        query_result = query(path, session=None)
        clip_id = Path(path).stem
        for seq, (b, mp4) in enumerate(query_result['results']):
            if len(mp4) > 0:
                with open(f"{mp4_dir}/{clip_id}_{seq}_{b['t1']}_{b['t2']}.mp4",
                          'wb') as f:
                    f.write(mp4)
                    logger.info(f"saved {f.name}")

        logger.info(f"# results = {len(query_result['results'])}")
        del query_result['results']
        logger.info(query_result)