Пример #1
0
def test_perf_timer_report():
    # multiple calls to _report_once() cause only one report
    log_fn = Mock()
    timer = PerfTimer('foo', log_fn=log_fn)
    with timer:
        pass
    timer._report_once()
    timer._report_once()
    log_fn.assert_called_once()
Пример #2
0
def test_perf_timer_one_run():
    log_fn = Mock()
    timer = PerfTimer('foo', log_fn=log_fn)

    with timer:
        pass

    assert timer._count == 1
    timer._report()
    log_fn.assert_called_once_with(_NotContaining(' in '))
Пример #3
0
def test_perf_timer():
    # time_fn is called on enter and exit of each with block
    time_fn = Mock(side_effect=[10, 15, 15, 25])
    log_fn = Mock()
    timer = PerfTimer('foo',
                      observer=AverageObserver,
                      time_fn=time_fn,
                      log_fn=log_fn)

    for _ in range(2):
        with timer:
            pass

    assert timer._count == 2
    assert timer._sum == 15
    assert timer._max == 10
    timer._report()
    log_fn.assert_called_once_with(_Containing('in 2 runs'))
Пример #4
0
def do_warmup(indexer: 'BaseNumpyIndexer', stat: dict):
    t = PerfTimer()
    with t:
        for keys, vecs in read_data(QUERY_FEED_PATH,
                                    batch_size=1,
                                    max_docs=100):
            indexer.query(vecs, 1)
    stat[BUILD_TIME_S] = t.interval
    print(f'Took {t.interval} seconds to train and warmup the index')
Пример #5
0
def do_index(indexer: 'BaseNumpyIndexer', batch_size: int, stat: dict):
    t = PerfTimer()
    with t:
        n = 0
        for keys, vecs in read_data(INDEX_FEED_PATH, batch_size):
            indexer.add(keys, vecs)
            n += batch_size
    stat[INDEX_TIME_S] = t.interval
    stat[INDEX_DOCS] = n
    stat[INDEX_DOCS_S] = n / t.interval
    print(
        f'Took {t.interval} seconds to index {n} documents: {n / t.interval} doc/s'
    )
Пример #6
0
def do_query(indexer: 'BaseNumpyIndexer', batch_size: int, top_k: int,
             stat: dict):
    results = np.empty((0, top_k), 'float32')
    t = PerfTimer()
    with t:
        n = 0
        for keys, vecs in read_data(QUERY_FEED_PATH, batch_size):
            doc_ids, dists = indexer.query(vecs, top_k)
            if doc_ids.shape != (keys.shape[0], ):
                results = np.vstack((results, doc_ids))
            n += batch_size
    stat[QUERY_TIME] = t.interval
    stat[QUERY_DOCS] = n
    stat[QUERY_DOCS_S] = n / t.interval
    print(
        f'Took {t.interval} seconds to query {n} documents: {n / t.interval} doc/s'
    )
    return results
Пример #7
0
def test_perf_timer_non_reentrant():
    timer = PerfTimer('foo')
    with timer:
        with pytest.raises(RuntimeError):
            with timer:
                pass
Пример #8
0
def test_perf_timer_atexit_is_weak(_report_once):
    # atexit doesn't trigger _report_once() if object already finalized
    timer = PerfTimer('foo')
    del timer
    _impl._atexit()
    assert _report_once.call_count == 1
Пример #9
0
def test_perf_timer_atexit_and_del(_report_once):
    # atexit and del each cause 1 call to _report_once()
    timer = PerfTimer('foo')
    _impl._atexit()
    del timer
    assert _report_once.call_count == 2
Пример #10
0
def test_perf_timer_not_implemented():
    with pytest.raises(NotImplementedError):
        PerfTimer('foo', time_fn=None)
Пример #11
0
def test_perf_timer_type():
    # since metaclass is used, ensure type is cached
    assert type(PerfTimer('foo')) is type(PerfTimer('bar'))