示例#1
0
def exec_bench(setup, tested_function, param_list, n_runs, warmup_runs):
    backend_list = get_backend_list()
    for i, nx in enumerate(backend_list):
        if nx.__name__ == "tf" and i < len(backend_list) - 1:
            # Tensorflow should be the last one to be benchmarked because
            # as far as I'm aware, there is no way to force it to release
            # GPU memory. Hence, if any other backend is benchmarked after
            # Tensorflow and requires the usage of a GPU, it will not have the
            # full memory available and you may have a GPU Out Of Memory error
            # even though your GPU can technically hold your tensors in memory.
            backend_list.pop(i)
            backend_list.append(nx)
            break

    inputs = [setup(param) for param in param_list]
    results = dict()
    for nx in backend_list:
        for i in range(len(param_list)):
            print(nx, param_list[i])
            args = inputs[i]
            results_nx = nx._bench(tested_function,
                                   *args,
                                   n_runs=n_runs,
                                   warmup_runs=warmup_runs)
            gc.collect()
            results_nx_with_param_in_key = dict()
            for key in results_nx:
                new_key = (param_list[i], *key)
                results_nx_with_param_in_key[new_key] = results_nx[key]
            results.update(results_nx_with_param_in_key)
    return results
示例#2
0
"""Tests for module bregman on OT with bregman projections """

# Author: Remi Flamary <*****@*****.**>
#         Kilian Fatras <*****@*****.**>
#
# License: MIT License

import numpy as np
import pytest

import ot
from ot.backend import get_backend_list
from ot.backend import torch

backend_list = get_backend_list()


def test_sinkhorn():
    # test sinkhorn
    n = 100
    rng = np.random.RandomState(0)

    x = rng.randn(n, 2)
    u = ot.utils.unif(n)

    M = ot.dist(x, x)

    G = ot.sinkhorn(u, u, M, 1, stopThr=1e-10)

    # check constratints
    np.testing.assert_allclose(u, G.sum(1),
示例#3
0
def test_get_backend_list():

    lst = get_backend_list()

    assert len(lst) > 0
    assert isinstance(lst[0], ot.backend.NumpyBackend)