def run_bench(repeat=5, verbose=False):

    pbefore = dict(dim=[-1],
                   model=list(
                       sorted([
                           'XGB', 'LGB', 'SVR', 'NuSVR', 'RF', 'DT', 'ADA',
                           'MLP', 'LR', 'GBT', 'KNN', 'KNN-cdist', 'HGB'
                       ])),
                   norm=[False, True],
                   dataset=["boston", "diabetes", "rndbin100"])
    pafter = dict(N=[
        1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000,
        50000
    ])

    test = lambda dim=None, **opts: DatasetsOrtBenchPerfTest(**opts)
    bp = BenchPerf(pbefore, pafter, test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
def run_bench(repeat=10, verbose=False):

    pbefore = dict(
        dim=[1, 5, 10, 20],
        alpha=[0.1, 1., 10.],
        onnx_options=[None, {
            GaussianProcessRegressor: {
                'optim': 'cdist'
            }
        }],
        dtype=[numpy.float32, numpy.float64])
    pafter = dict(N=[1, 10, 100, 1000])

    test = lambda dim=None, **opts: OnnxRuntimeBenchPerfTestRegression(
        GaussianProcessRegressor, dim=dim, N_fit=100, **opts)
    bp = BenchPerf(pbefore, pafter, test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
Exemple #3
0
def run_bench(repeat=10, verbose=False):

    pbefore = dict(n_neighbors=[2, 5],
                   leaf_size=[10],
                   dim=[2, 5],
                   onnx_options=[
                       None, {
                           KNeighborsClassifier: {
                               'optim': 'cdist',
                               'zipmap': False
                           }
                       }
                   ],
                   metric=["euclidean"])
    pafter = dict(N=[1, 10, 100])

    test = lambda dim=None, **opts: OnnxRuntimeBenchPerfTestBinaryClassification(
        KNeighborsClassifier, dim=dim, **opts)
    bp = BenchPerf(pbefore, pafter, test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
Exemple #4
0
def run_bench(repeat=10, verbose=False):

    pbefore = dict(dim=[1, 5, 10, 100],
                   max_depth=[2, 10],
                   n_estimators=[1, 10, 100, 1000, 10000],
                   onnx_options=[{
                       RandomForestClassifier: {
                           'zipmap': False
                       }
                   }])
    pafter = dict(N=[1, 10, 100])

    test = lambda dim=None, **opts: OnnxRuntimeBenchPerfTestBinaryClassification(
        RandomForestClassifier, dim=dim, **opts)
    bp = BenchPerf(pbefore, pafter, test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
def run_bench(repeat=100, verbose=False):
    pbefore = dict(dim=[2, 5, 10])
    pafter = dict(N=[1, 10, 100])

    bp = BenchPerf(pbefore, pafter, Re2Bench)

    results = list(bp.enumerate_run_benchs(repeat=repeat, verbose=verbose))
    results_df = pandas.DataFrame(results)
    return results_df
Exemple #6
0
def run_bench(repeat=100, verbose=False):
    pbefore = dict(dim=[5, 10, 50])
    pafter = dict(N=[10, 100, 1000])
    bp = BenchPerf(pbefore, pafter, PolyBenchPerfTest)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(bp.enumerate_run_benchs(repeat=repeat, verbose=verbose))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
Exemple #7
0
    def test_perf_onnxruntime_logreg_fails(self):
        res = onnxruntime_perf_binary_classifiers(MyBenchTest)[0]

        bp = BenchPerf(res['pbefore'], res['pafter'], res['fct'])
        results = list(
            bp.enumerate_run_benchs(repeat=10,
                                    verbose=True,
                                    stop_if_error=False))
        results_df = pandas.DataFrame(results)
        su = results_df['error_c'].sum()
        self.assertEqual(su, results_df.shape[0])
        temp = get_temp_folder(__file__, "temp_perf_onnxruntime_logreg_fails")
        out = os.path.join(temp, "onnxruntime_logreg.perf.csv")
        results_df.to_csv(out, index=False)
        self.assertExists(out)
        text = str(bp)
        self.assertIn("BenchPerf(", text)
def run_bench(repeat=10, verbose=False):

    pbefore = dict(dim=[1, 5], fit_intercept=[True])
    pafter = dict(N=[1, 10, 100])
    test = lambda dim=None, **opts: (
        OnnxRuntimeBenchPerfTestBinaryClassification3(
            LogisticRegression, dim=dim, **opts))
    bp = BenchPerf(pbefore, pafter, test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(bp.enumerate_run_benchs(repeat=repeat, verbose=verbose))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
Exemple #9
0
def run_bench(repeat=10, verbose=False):

    pbefore = dict(dim=[1, 5, 10, 20, 50, 100, 200],
                   max_depth=[3, 10, 20])
    pafter = dict(N=[1, 10, 100, 1000, 10000])

    test = lambda dim=None, **opts: OnnxRuntimeBenchPerfTestRegression(
        DecisionTreeRegressor, dim=dim, **opts)
    bp = BenchPerf(pbefore, pafter, test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(bp.enumerate_run_benchs(repeat=repeat, verbose=verbose,
                                               stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
Exemple #10
0
    def test_perf_onnxruntime_logreg(self):
        res = onnxruntime_perf_binary_classifiers()[0]

        bp = BenchPerf(res['pbefore'], res['pafter'], res['fct'])
        results = list(bp.enumerate_run_benchs(repeat=10, verbose=True))
        results_df = pandas.DataFrame(results)
        temp = get_temp_folder(__file__, "temp_perf_onnxruntime_logreg")
        out = os.path.join(temp, "onnxruntime_logreg.perf.csv")
        results_df.to_csv(out, index=False)
        self.assertExists(out)

        subset = {
            'sklearn', 'numpy', 'pandas', 'onnxruntime', 'skl2onnx',
            'mlprodict'
        }

        df = pandas.DataFrame(machine_information(subset))
        out = os.path.join(temp, "onnxruntime_logreg.time.csv")
        df.to_csv(out, index=False)
        self.assertExists(out)
Exemple #11
0
def run_bench(repeat=20, number=10, verbose=False):

    pbefore = dict(dim=[1, 10, 100],
                   nbnode=[1, 2, 3, 5, 10, 50, 100, 150, 200, 250, 300])
    pafter = dict(N=[1, 10, 100, 1000, 10000])

    test = lambda dim=None, **opts: GraphORtBenchPerfTest(dim=dim, **opts)
    bp = BenchPerf(pbefore, pafter, test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    number=number,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
def run_bench(repeat=20, number=10, verbose=False):

    pbefore = dict(edims=[(10, 10), (100, 50), (50, 20, 5)],
                   axes=[(1, ), (2, )])
    pafter = dict(N=[1, 10, 100, 1000, 2000, 5000])

    test = lambda edims=None, axes=None, **opts: GraphOrtBenchPerfTest(
        edims=edims, axes=axes, **opts)
    bp = BenchPerf(pbefore, pafter, test, filter_test=fct_filter_test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    number=number,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
Exemple #13
0
def run_bench(repeat=10, verbose=False):

    pbefore = dict(hidden_layer_sizes=[(2, ), (10, ), (20, ), (2, 2), (10, 2),
                                       (20, 2)],
                   activation=['relu', 'logistic'],
                   dim=[2, 5, 10],
                   onnx_options=[{
                       MLPClassifier: {
                           'zipmap': False
                       }
                   }])
    pafter = dict(N=[1, 10, 100, 1000])

    merged = {}
    merged.update(pbefore)
    merged.update(pafter)
    d0 = {k: v[0] for k, v in merged.items()}

    profilers = [
        ProfilerCall(d0, module="cProfile"),
        ProfilerCall(d0, module="cProfile")
    ]

    test = lambda dim=None, **opts: OnnxRuntimeBenchPerfTestBinaryClassification(
        MLPClassifier, dim=dim, **opts)

    bp = BenchPerf(pbefore, pafter, test, profilers=profilers)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df, profilers
Exemple #14
0
def run_bench(repeat=10, verbose=False):

    pbefore = dict(dim=[-1],
                   lib=['sklh', 'skl', 'xgb', 'lgb'],
                   dataset=["breast_cancer", "digits", "rndbin100"])
    pafter = dict(N=[
        1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000,
        50000
    ])

    test = lambda dim=None, **opts: LibOrtBenchPerfTest(**opts)
    bp = BenchPerf(pbefore, pafter, test)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df
Exemple #15
0
def run_bench(repeat=25,
              number=20,
              verbose=False,
              fixed_dim=None,
              skl_model=None,
              model_onnx=None):

    pbefore = dict(dim=[fixed_dim],
                   fixed_dim=[fixed_dim],
                   skl_model=[skl_model],
                   model_onnx=[model_onnx])
    pafter = dict(
        N=[1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000])

    profilers = [
        ProfilerCall(dict(N=1, method="skl_proba"), module="cProfile"),
        ProfilerCall(dict(N=1, method="onnx_proba"), module="cProfile"),
        ProfilerCall(dict(N=10, method="skl_proba")),
        ProfilerCall(dict(N=10, method="onnx_proba")),
    ]

    test = lambda dim=None, **opts: GraphORtBenchPerfTest(**opts)
    bp = BenchPerf(pbefore, pafter, test, profilers=profilers)

    with sklearn.config_context(assume_finite=True):
        start = time()
        results = list(
            bp.enumerate_run_benchs(repeat=repeat,
                                    verbose=verbose,
                                    number=number,
                                    stop_if_error=False))
        end = time()

    results_df = pandas.DataFrame(results)
    print("Total time = %0.3f sec\n" % (end - start))
    return results_df, profilers