예제 #1
0
    def __call__(self, obj):
        gc.collect()
        # get obj data
        tic = time.time()
        _ = obj.data
        ipc_time = time.time() - tic

        # save obj to tempfile
        tic = time.time()
        tmpf = tempfile.NamedTemporaryFile('wb',
                                           suffix='.mp4',
                                           prefix='STSearchFilter-',
                                           delete=False)
        tmpf.write(obj.data)
        tmpf.close()
        save_time = time.time() - tic

        # init and execute query, buffer all results
        tic = time.time()
        self.session.log('info', f"starting query() working on {tmpf.name}")

        # use mp to workaround memory leaks in OpenCV
        def child_f(path, query_fn, conn):
            conn.send(query_fn(path, None))
            conn.close()

        parent_conn, child_conn = multiprocessing.Pipe()
        p = multiprocessing.Process(target=child_f,
                                    args=(tmpf.name, self.query_fn,
                                          child_conn))
        p.start()
        query_result = parent_conn.recv()
        p.join()
        # query_result = self.query_fn(tmpf.name, session=self.session)
        self.session.log('info', f"query() done on {tmpf.name}")
        query_time = time.time() - tic

        # delete tempfile
        os.unlink(tmpf.name)

        tic = time.time()
        query_result_serialized = pickle.dumps(query_result)
        pickle_time = time.time() - tic

        msg = STSearchResult()
        msg.query_result = query_result_serialized
        msg.stats.update({
            'input_size': float(len(obj.data)),
            'ipc_time': ipc_time,
            'save_time': save_time,
            'query_time': query_time,
            'pickle_time': pickle_time
        })

        obj.set_binary(self.output_attr, msg.SerializeToString())
        return True
예제 #2
0
def main(mode, path=None, cache_dir='okutama_cache'):
    assert mode in ('remote', 'local')

    if mode == 'remote':
        from pathlib import Path
        import pickle
        import time

        import pandas as pd
        from stsearch.diamond_wrap.result_pb2 import STSearchResult
        from stsearch.diamond_wrap.utils import start_stsearch_by_script, OUTPUT_ATTR

        tic = time.time()

        results = start_stsearch_by_script(open(__file__, 'rb').read())

        for i, res in enumerate(results):
            # each `res` corresponds to results of a clip_id
            object_id = res['_ObjectID'].decode()
            clip_id = Path(object_id).stem
            print(
                f"=> Result {i}. Time {(time.time()-tic)/60:.1f} min. Clip {clip_id}"
            )

            filter_result: STSearchResult = STSearchResult()
            filter_result.ParseFromString(res[OUTPUT_ATTR])
            query_result = pickle.loads(filter_result.query_result)
            query_result['clip_id'] = clip_id

            with open(
                    Path(cache_dir) / f"{query_result['file_digest']}.json",
                    'wt') as f:
                json.dump(query_result, f, indent=2)

    elif mode == 'local':
        from pathlib import Path
        assert path is not None

        query_result = query(path, session=None)
        clip_id = Path(path).stem
        logger.info(query_result)
예제 #3
0
def main(output="okutama_metadata.csv"):
    from pathlib import Path
    import pickle
    import pandas as pd
    from stsearch.diamond_wrap.result_pb2 import STSearchResult
    from stsearch.diamond_wrap.utils import start_stsearch_by_script, OUTPUT_ATTR

    results = start_stsearch_by_script(open(__file__, 'rb').read())

    total_frames = 0
    total_hrs = 0
    save_results = []


    for i, res in enumerate(results):
        object_id = res['_ObjectID'].decode()
        clip_id = Path(object_id).stem

        filter_result: STSearchResult = STSearchResult()
        filter_result.ParseFromString(res[OUTPUT_ATTR])
        query_result = pickle.loads(filter_result.query_result)
        print(f"{clip_id}, {query_result}")
        total_frames += query_result['frame_count']
        total_hrs += query_result['frame_count'] / query_result['fps'] / 3600

        save_results.append(
            {
                'clip_id': clip_id,
                'frame_count': query_result['frame_count'],
                'fps': query_result['fps'],
                'height': query_result['height'],
                'width': query_result['width']
            }
        )

        pd.DataFrame(save_results).to_csv(output)

    print(f"total_hrs: {total_hrs}. Total frames: {total_frames}. Count: {i}")
예제 #4
0
def main(mode,
         path=None,
         result_file="person_and_object_result.csv",
         get_mp4=True,
         mp4_dir="person_and_object_mp4"):
    assert mode in ('remote', 'local')

    if mode == 'remote':
        from pathlib import Path
        import pickle
        import time

        import pandas as pd
        from stsearch.diamond_wrap.result_pb2 import STSearchResult
        from stsearch.diamond_wrap.utils import start_stsearch_by_script, OUTPUT_ATTR

        tic = time.time()

        results = start_stsearch_by_script(open(__file__, 'rb').read())

        save_results = []

        for i, res in enumerate(results):
            # each `res` corresponds to results of a clip_id
            object_id = res['_ObjectID'].decode()
            clip_id = Path(object_id).stem
            print(
                f"=> Result {i}. Time {(time.time()-tic)/60:.1f} min. Clip {clip_id}"
            )

            filter_result: STSearchResult = STSearchResult()
            filter_result.ParseFromString(res[OUTPUT_ATTR])
            query_result = pickle.loads(filter_result.query_result)
            metadata = query_result['metadata']

            for seq, (b, mp4) in enumerate(query_result['results']):
                if len(mp4) > 0:
                    with open(
                            f"{mp4_dir}/{clip_id}_{seq}_{b['t1']}_{b['t2']}.mp4",
                            'wb') as f:
                        f.write(mp4)
                        logger.info(f"saved {f.name}")

                save_results.append({
                    'clip_id': clip_id,
                    't1': b['t1'],
                    't2': b['t2'],
                    'x1': b['x1'],
                    'x2': b['x2'],
                    'y1': b['y1'],
                    'y2': b['y2'],
                    'result_size': len(mp4),
                    'frame_count': metadata['frame_count'],
                    'fps': metadata['fps'],
                    'width': metadata['width'],
                    'height': metadata['height'],
                })

            logger.info(f"# results = {len(query_result['results'])}")
            del query_result['results']
            logger.info(query_result)

            pd.DataFrame(save_results).to_csv(result_file)

    elif mode == 'local':
        from pathlib import Path

        assert path is not None
        global OKUTAMA_CACHE_DIR
        OKUTAMA_CACHE_DIR = "/home/zf/video-analytics/stsearch/okutama_experiment/okutama_cache"
        query_result = query(path, session=None)
        clip_id = Path(path).stem
        for seq, (b, mp4) in enumerate(query_result['results']):
            if len(mp4) > 0:
                with open(f"{mp4_dir}/{clip_id}_{seq}_{b['t1']}_{b['t2']}.mp4",
                          'wb') as f:
                    f.write(mp4)
                    logger.info(f"saved {f.name}")

        logger.info(f"# results = {len(query_result['results'])}")
        del query_result['results']
        logger.info(query_result)
예제 #5
0
if __name__ == "__main__":
    import json
    from pathlib import Path
    import pickle
    import time

    import pandas as pd
    from stsearch.diamond_wrap.result_pb2 import STSearchResult
    from utils import start_stsearch_by_script, OUTPUT_ATTR

    RESULT_DIR = "cache"

    tic = time.time()

    results = start_stsearch_by_script(open(__file__, 'rb').read())

    for i, res in enumerate(results):
        logger.info(f"=> Result {i}. Time {(time.time()-tic)/60:.1f} min.")
        object_id = res['_ObjectID'].decode()
        clip_id = Path(object_id).stem

        filter_result: STSearchResult = STSearchResult()
        filter_result.ParseFromString(res[OUTPUT_ATTR])
        query_result = pickle.loads(filter_result.query_result)
        digest = query_result['file_digest']
        query_result['clip_id'] = clip_id

        with open(f"{RESULT_DIR}/{digest}.json", 'wt') as fout:
            json.dump(query_result, fout, indent=2)
예제 #6
0
    code_blob = Blob(
        open(get_default_code_path("fil_stsearch.py"), 'rb').read())
    fil_stsearch_spec = FilterSpec(name="fil-stsearch",
                                   code=code_blob,
                                   arguments=(OUTPUT_ATTR, 'script'),
                                   blob_argument=script_blob)

    search = DiamondSearch(get_default_scopecookies(), [
        fil_stsearch_spec,
    ],
                           push_attrs=[
                               OUTPUT_ATTR,
                           ])
    search_id = search.start()
    for i, res in enumerate(search.results):
        object_id = res['_ObjectID'].decode()

        filter_result = STSearchResult()
        filter_result.ParseFromString(res[OUTPUT_ATTR])
        query_result = pickle.loads(filter_result.query_result)
        print(f"{object_id}, {filter_result.stats}, {len(query_result)}")

        for k, (bound, blob, ext) in enumerate(query_result):
            # print(object_id, k, bound, len(blob), ext)
            with open(f"{pathlib.Path(object_id).stem}-{k}.{ext}", 'wb') as f:
                f.write(blob)

    stats = search.get_stats()
    search.close()
    print(stats)