Esempio n. 1
0
    def __init__(self,
                 estimator,
                 dim=None,
                 N_fit=100000,
                 runtimes=('python_compiled', 'onnxruntime1'),
                 onnx_options=None,
                 dtype=numpy.float32,
                 **opts):
        """
        @param      estimator       estimator class
        @param      dim             number of features
        @param      N_fit           number of observations to fit an estimator
        @param      runtimes        runtimes to test for class :epkg:`OnnxInference`
        @param      opts            training settings
        @param      onnx_options    ONNX conversion options
        @param      dtype           dtype (float32 or float64)
        """
        # These libraries are optional.
        from skl2onnx import to_onnx  # pylint: disable=E0401,C0415
        from skl2onnx.common.data_types import FloatTensorType, DoubleTensorType  # pylint: disable=E0401,C0415

        if dim is None:
            raise RuntimeError(  # pragma: no cover
                "dim must be defined.")
        BenchPerfTest.__init__(self, **opts)

        allowed = {"max_depth"}
        opts = {k: v for k, v in opts.items() if k in allowed}
        self.dtype = dtype
        self.skl = estimator(**opts)
        X, y = self._get_random_dataset(N_fit, dim)
        try:
            self.skl.fit(X, y)
        except Exception as e:  # pragma: no cover
            raise RuntimeError(
                "X.shape={}\nopts={}\nTraining failed for {}".format(
                    X.shape, opts, self.skl)) from e

        if dtype == numpy.float64:
            initial_types = [('X', DoubleTensorType([None, X.shape[1]]))]
        elif dtype == numpy.float32:
            initial_types = [('X', FloatTensorType([None, X.shape[1]]))]
        else:
            raise ValueError(  # pragma: no cover
                "Unable to convert the model into ONNX, unsupported dtype {}.".
                format(dtype))
        self.logconvert = StringIO()
        with contextlib.redirect_stdout(self.logconvert):
            with contextlib.redirect_stderr(self.logconvert):
                onx = to_onnx(self.skl,
                              initial_types=initial_types,
                              options=onnx_options,
                              target_opset=get_opset_number_from_onnx())
                onx.ir_version = get_ir_version_from_onnx()

        self._init(onx, runtimes)
 def test_onnxt_runtime_add1(self):
     idi = numpy.identity(2, dtype=numpy.float32)
     onx = OnnxAdd('X', idi, output_names=['Y'],
                   op_version=get_opset_number_from_onnx())
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32)
     model_def.ir_version = get_ir_version_from_onnx()
     oinf = OnnxInference(model_def, runtime='onnxruntime1')
     got = oinf.run({'X': X})
     self.assertEqual(list(sorted(got)), ['Y'])
     self.assertEqualArray(idi + X, got['Y'], decimal=6)
Esempio n. 3
0
 def test_onnxt_runtime_array_feature_extractor_cmp(self):
     X = numpy.array([3.3626876, 2.204158, 2.267245, 1.297554, 0.97023404],
                     dtype=numpy.float32)
     indices = numpy.array([[
         4,
         2,
         0,
         1,
         3,
     ], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [3, 4, 2, 0, 1], [0, 2, 3, 4, 1]],
                           dtype=numpy.int64)
     onx = OnnxArrayFeatureExtractor('X', indices, output_names=['Y'])
     model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
                             outputs=[('Y', FloatTensorType([2]))])
     model_def.ir_version = get_ir_version_from_onnx()
     oinf = OnnxInference(model_def)
     got = oinf.run({'X': X})['Y']
     model_def.ir_version = get_ir_version_from_onnx()
     oinf2 = OnnxInference(model_def, runtime="onnxruntime2")
     got2 = oinf2.run({'X': X})['Y']
     self.assertEqualArray(got, got2)
Esempio n. 4
0
 def test_onnxt_runtime_array_feature_extractor_cmp4(self):
     X = numpy.random.randn(38, 5).astype(  # pylint: disable=E1101
         numpy.float32)  # pylint: disable=E1101
     indices = numpy.ones((38, 1), dtype=numpy.int64)
     onx = OnnxArrayFeatureExtractor('X', indices, output_names=['Y'])
     model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
                             outputs=[('Y', FloatTensorType([2]))])
     oinf = OnnxInference(model_def)
     got = oinf.run({'X': X})['Y']
     model_def.ir_version = get_ir_version_from_onnx()
     oinf2 = OnnxInference(model_def, runtime="onnxruntime2")
     got2 = oinf2.run({'X': X})['Y']
     self.assertEqualArray(got, got2)
Esempio n. 5
0
 def test_code_add_except(self):
     idi = numpy.identity(2, dtype=numpy.float32)
     onx = OnnxAdd('X',
                   idi,
                   output_names=['Y'],
                   op_version=get_opset_number_from_onnx())
     model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})
     model_def.ir_version = get_ir_version_from_onnx()
     oinf = OnnxInference(model_def, runtime='onnxruntime1')
     try:
         oinf.to_python()
     except ValueError:
         pass
 def test_kernel_exp_sine_squared(self):
     from skl2onnx.operator_converters.gaussian_process import convert_kernel
     ker = ExpSineSquared()
     onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                          op_version=10)
     model_onnx = onx.to_onnx(
         inputs=[('X', FloatTensorType([None, None]))])
     model_onnx.ir_version = get_ir_version_from_onnx()
     sess = OnnxInference(model_onnx, runtime='onnxruntime1')
     Xtest_ = numpy.arange(6).reshape((3, 2))
     res = sess.run({'X': Xtest_.astype(numpy.float32)})
     m1 = res['Y']
     m2 = ker(Xtest_)
     self.assertEqualArray(m1, m2, decimal=5)
 def test_kernel_rbf1(self):
     from skl2onnx.operator_converters.gaussian_process import convert_kernel
     ker = RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3))
     onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=numpy.float32,
                          op_version=10)
     model_onnx = onx.to_onnx(
         inputs=[('X', FloatTensorType([None, None]))])
     model_onnx.ir_version = get_ir_version_from_onnx()
     sess = OnnxInference(model_onnx, runtime='onnxruntime1')
     Xtest_ = numpy.arange(6).reshape((3, 2))
     res = sess.run({'X': Xtest_.astype(numpy.float32)})
     m1 = res['Y']
     m2 = ker(Xtest_)
     self.assertEqualArray(m1, m2)
Esempio n. 8
0
    def test_model_bernoulli_nb_bc_onnxruntime1(self):
        model, X = self.fit_classification_model(BernoulliNB(), 2)
        model_onnx = convert_sklearn(
            model, "?", [("input", FloatTensorType([None, X.shape[1]]))],
            target_opset=get_opset_number_from_onnx())
        exp1 = model.predict(X)
        exp = model.predict_proba(X)

        model_onnx.ir_version = get_ir_version_from_onnx()
        oinf = _capture_output(
            lambda: OnnxInference(model_onnx, runtime='onnxruntime1'),
            'c')[0]
        got = oinf.run({'input': X})
        self.assertEqualArray(exp1, got['output_label'])
        got2 = DataFrame(got['output_probability']).values
        self.assertEqualArray(exp, got2, decimal=4)
Esempio n. 9
0
    def _create_onnx_inference(self, onx, runtime):
        if 'onnxruntime' in runtime:
            old = onx.ir_version
            onx.ir_version = get_ir_version_from_onnx()
        else:
            old = None

        try:
            res = OnnxInference(onx, runtime=runtime)
        except RuntimeError as e:  # pragma: no cover
            if "[ONNXRuntimeError]" in str(e):
                return RuntimeError("onnxruntime fails due to {}".format(
                    str(e)))
            raise e
        if old is not None:
            onx.ir_version = old
        return res
Esempio n. 10
0
    def test_rt_KNeighborsRegressor_onnxruntime(self):
        fLOG(__file__,
             self._testMethodName,
             OutputPrint=__name__ == "__main__")
        logger = getLogger('skl2onnx')
        logger.disabled = True

        iris = load_iris()
        X, y = iris.data, iris.target.astype(numpy.float32)
        X_train, X_test, y_train, _ = train_test_split(X, y)
        clr = KNeighborsRegressor()
        clr.fit(X_train, y_train)

        x2 = X_test.astype(numpy.float32)
        onx = to_onnx(clr, x2, rewrite_ops=True, target_opset=10)
        onx.ir_version = get_ir_version_from_onnx()
        pyrun = OnnxInference(onx, runtime="onnxruntime1")
        res = pyrun.run({'X': x2}, fLOG=print, verbose=1)
        self.assertIn('variable', res)
        self.assertEqual(res['variable'].shape, (38, 1))
Esempio n. 11
0
    def test_onnx_helper_load_save(self):
        model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5))
        X = numpy.array([[0.1, 1.1], [0.2, 2.2]])
        model.fit(X)
        model_onnx = convert_sklearn(model, 'binarizer',
                                     [('input', FloatTensorType([None, 2]))])
        model_onnx.ir_version = get_ir_version_from_onnx()
        filename = "temp_onnx_helper_load_save.onnx"
        save_onnx_model(model_onnx, filename)
        model = load_onnx_model(filename)
        list(enumerate_model_node_outputs(model))
        new_model = select_model_inputs_outputs(model, 'variable')
        self.assertTrue(new_model.graph is not None)  # pylint: disable=E1101

        tr1 = self.get_model(model)
        tr2 = self.get_model(new_model)
        X = X.astype(numpy.float32)
        X1 = tr1(X)
        X2 = tr2(X)
        self.assertEqual(X1.shape, (2, 2))
        self.assertEqual(X2.shape, (2, 2))
Esempio n. 12
0
    def test_onnxrt_python_one_class_svm(self):
        X = numpy.array([[0, 1, 2], [44, 36, 18], [-4, -7, -5]],
                        dtype=numpy.float32)

        with self.subTest(dtype='float64'):
            for kernel in ['linear', 'sigmoid', 'rbf', 'poly']:
                model = OneClassSVM(kernel=kernel).fit(X)
                X64 = X.astype(numpy.float64)
                model_onnx = to_onnx(model, X64)
                model.decision_function(X64)
                self.assertIn("SVMRegressorDouble", str(model_onnx))
                oinf = OnnxInference(model_onnx, runtime='python')
                res = oinf.run({'X': X64})
                scores = res['scores']
                dec = model.decision_function(X64)
                self.assertEqualArray(scores, dec, decimal=5)
                # print("64", kernel + ("-" * (7 - len(kernel))), scores - dec, "skl", dec)

        with self.subTest(dtype='floa32'):
            for kernel in ['linear', 'sigmoid', 'rbf', 'poly']:
                model = OneClassSVM(kernel=kernel).fit(X)
                X32 = X.astype(numpy.float32)
                model_onnx = to_onnx(model, X32)
                oinf = OnnxInference(model_onnx, runtime='python')
                res = oinf.run({'X': X32})
                scores = res['scores']
                dec = model.decision_function(X32)
                self.assertEqualArray(scores, dec, decimal=4)
                # print("32", kernel + ("-" * (7 - len(kernel))), scores - dec, "skl", dec)

                model_onnx.ir_version = get_ir_version_from_onnx()
                oinf = OnnxInference(model_onnx, runtime='onnxruntime1')
                res = oinf.run({'X': X32})
                scores = res['scores']
                dec = model.decision_function(X32)
                self.assertEqualArray(scores.ravel(), dec.ravel(), decimal=4)
Esempio n. 13
0
    def test_gradient_boosting_regressor_pipeline(self):
        fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")

        random_seed = 123
        df = load_audit()
        train, test = train_test_split(df, test_size=0.2,
                                       random_state=random_seed)
        target_feature = 'TARGET_Adjusted'
        y_train = train[target_feature]
        x_train = train.drop(target_feature, axis=1, inplace=False)
        y_test = test[target_feature]
        x_test = test.drop(target_feature, axis=1, inplace=False)
        cols = list(x_train.columns)
        numerical_cols = list(
            x_train._get_numeric_data().columns)  # pylint: disable=W0212
        categorical_cols = list(set(cols) - set(numerical_cols))

        n_trees = 50
        max_depth = 10
        predictor = Pipeline([
            ('prep', ColumnTransformer([
                            ('num_prep', StandardScaler(), numerical_cols),
                            ('cat_prep', OneHotEncoder(
                                handle_unknown='ignore'), categorical_cols)
            ])),

            ('model', GradientBoostingClassifier(
                learning_rate=0.01,
                random_state=random_seed,
                n_estimators=n_trees,
                max_depth=max_depth))
        ])

        predictor.fit(x_train, y_train)
        fLOG('accuracy: ' + str(predictor.score(x_test, y_test)))
        sklearn_predictions = DataFrame(
            predictor.predict(x_test), columns=['sklearn_prediction'])

        def convert_dataframe_schema(df, drop=None):
            inputs = []
            for k, v in zip(df.columns, df.dtypes):
                if drop is not None and k in drop:
                    continue
                # also ints treated as floats otherwise onnx exception "all columns must be equal" is raised.
                if v in ('int64', 'float64'):
                    t = FloatTensorType([None, 1])
                else:
                    t = StringTensorType([None, 1])
                inputs.append((k, t))
            return inputs

        model_name = 'gbt_audit'
        inputs = convert_dataframe_schema(x_train)
        try:
            model_onnx = convert_sklearn(predictor, model_name, inputs)
        except Exception as e:
            raise e

        data = {col[0]: x_test[col[0]].values.reshape(x_test.shape[0], 1)
                for col in inputs}
        for col in numerical_cols:
            data[col] = data[col].astype(numpy.float32)

        for runtime in ['python', 'python_compiled',
                        'onnxruntime1', 'onnxruntime2']:
            if runtime == 'onnxruntime2':
                # Type for text column are guessed wrong
                # (Float instead of text).
                continue

            if 'onnxruntime' in runtime:
                model_onnx.ir_version = get_ir_version_from_onnx()
            sess = OnnxInference(model_onnx, runtime=runtime)

            onnx_predictions = sess.run(data)
            onnx_predictions = DataFrame(
                {'onnx_prediction': onnx_predictions['output_label']})

            fLOG('Model accuracy in SKlearn = ' +
                 str(accuracy_score(y_test, sklearn_predictions.values)))
            fLOG('Model accuracy in ONNX = ' +
                 str(accuracy_score(y_test, onnx_predictions)))
            fLOG()
            fLOG('predicted class distribution from SKLearn')
            fLOG(sklearn_predictions['sklearn_prediction'].value_counts())
            fLOG()
            fLOG('predicted class distribution from ONNX')
            fLOG(onnx_predictions['onnx_prediction'].value_counts())
            fLOG()

            df = concat([sklearn_predictions, onnx_predictions], axis=1)
            df["diff"] = df["sklearn_prediction"] - df["onnx_prediction"]
            df["diff_abs"] = numpy.abs(df["diff"])
            total = df.sum()
            sum_diff = total["diff_abs"]
            if sum_diff != 0:
                raise AssertionError("Runtime: '{}', discrepencies: sum_diff={}"
                                     "".format(runtime, sum_diff))
Esempio n. 14
0
    def test_onnxrt_python_lightgbm_categorical_iris_dataframe(self):
        iris = load_iris()
        X, y = iris.data, iris.target
        X = (X * 10).astype(numpy.int32)
        X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
        other_x = numpy.random.randint(0,
                                       high=10,
                                       size=(1500, X_train.shape[1]))
        X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
        y_train = numpy.hstack([
            y_train,
            numpy.zeros(500) + 3,
            numpy.zeros(500) + 4,
            numpy.zeros(500) + 5
        ]).astype(dtype=numpy.int32)
        self.assertEqual(y_train.shape, (X_train.shape[0], ))
        y_train = y_train % 2

        df_train = pandas.DataFrame(X_train)
        df_train.columns = ['c1', 'c2', 'c3', 'c4']
        df_train['c1'] = df_train['c1'].astype('category')
        df_train['c2'] = df_train['c2'].astype('category')
        df_train['c3'] = df_train['c3'].astype('category')
        df_train['c4'] = df_train['c4'].astype('category')

        df_test = pandas.DataFrame(X_test)
        df_test.columns = ['c1', 'c2', 'c3', 'c4']
        df_test['c1'] = df_test['c1'].astype('category')
        df_test['c2'] = df_test['c2'].astype('category')
        df_test['c3'] = df_test['c3'].astype('category')
        df_test['c4'] = df_test['c4'].astype('category')

        # categorical_feature=[0, 1]
        train_data = Dataset(df_train, label=y_train)

        params = {
            "boosting_type": "gbdt",
            "learning_rate": 0.05,
            "n_estimators": 2,
            "objective": "binary",
            "max_bin": 5,
            "min_child_samples": 100,
            'verbose': -1,
        }

        booster = lgb_train(params, train_data)
        exp = booster.predict(X_test)

        onx = to_onnx(booster, df_train)
        self.assertIn('ZipMap', str(onx))

        oif = OnnxInference(onx)
        got = oif.run(df_test)
        values = pandas.DataFrame(got['output_probability']).values
        self.assertEqualArray(exp, values[:, 1], decimal=5)

        onx.ir_version = get_ir_version_from_onnx()
        oif = OnnxInference(onx, runtime='onnxruntime1')
        got = oif.run(df_test)
        values = pandas.DataFrame(got['output_probability']).values
        self.assertEqualArray(exp, values[:, 1], decimal=5)

        onx = to_onnx(booster,
                      df_train,
                      options={booster.__class__: {
                          'cast': True
                      }})
        self.assertIn('op_type: "Cast"', str(onx))
        oif = OnnxInference(onx)
        got = oif.run(df_test)
        values = pandas.DataFrame(got['output_probability']).values
        self.assertEqualArray(exp, values[:, 1], decimal=5)
Esempio n. 15
0
    def onnx_test_svm_single_classreg(self,
                                      dtype,
                                      n_targets=1,
                                      debug=False,
                                      add_noise=False,
                                      runtime='python',
                                      target_opset=None,
                                      kind='reg',
                                      level=1,
                                      **kwargs):
        iris = load_iris()
        X, y = iris.data, iris.target
        if add_noise:
            X += numpy.random.randn(X.shape[0], X.shape[1]) * 10
        if kind == 'reg':
            y = y.astype(dtype)
        elif kind == 'bin':
            y = (y % 2).astype(numpy.int64)
        elif kind == 'mcl':
            y = y.astype(numpy.int64)
        else:
            raise AssertionError("unknown '{}'".format(kind))

        if n_targets != 1:
            yn = numpy.empty((y.shape[0], n_targets), dtype=dtype)
            for i in range(n_targets):
                yn[:, i] = y + i
            y = yn
        X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
        X_test = X_test.astype(dtype)
        if kind in ('bin', 'mcl'):
            clr = SVC(**kwargs)
        elif kind == 'reg':
            clr = SVR(**kwargs)
        clr.fit(X_train, y_train)

        model_def = to_onnx(clr,
                            X_train.astype(dtype),
                            rewrite_ops=True,
                            target_opset=target_opset)
        if 'onnxruntime' in runtime:
            model_def.ir_version = get_ir_version_from_onnx()
        try:
            oinf = OnnxInference(model_def, runtime=runtime)
        except RuntimeError as e:
            if debug:
                raise RuntimeError(
                    "Unable to create a model\n{}".format(model_def)) from e
            raise e

        if debug:
            y = oinf.run({'X': X_test}, verbose=level, fLOG=print)
        else:
            y = oinf.run({'X': X_test})

        lexp = clr.predict(X_test)
        if kind == 'reg':
            self.assertEqual(list(sorted(y)), ['variable'])
            if dtype == numpy.float32:
                self.assertEqualArray(lexp.ravel(),
                                      y['variable'].ravel(),
                                      decimal=5)
            else:
                self.assertEqualArray(lexp, y['variable'], decimal=5)
        else:
            self.assertEqual(list(sorted(y)),
                             ['output_label', 'output_probability'])
            self.assertEqualArray(lexp, y['output_label'])
            lprob = clr.predict_proba(X_test)
            self.assertEqualArray(lprob,
                                  DataFrame(y['output_probability']).values,
                                  decimal=5)
Esempio n. 16
0
    def onnx_test_knn_single_classreg(self,
                                      dtype,
                                      n_targets=1,
                                      debug=False,
                                      add_noise=False,
                                      runtime='python',
                                      target_opset=None,
                                      optim=None,
                                      kind='reg',
                                      level=1,
                                      largest0=True,
                                      metric_params=None,
                                      **kwargs):
        iris = load_iris()
        X, y = iris.data, iris.target
        if add_noise:
            X += numpy.random.randn(X.shape[0], X.shape[1]) * 10
        if kind == 'reg':
            y = y.astype(dtype)
        elif kind == 'bin':
            y = (y % 2).astype(numpy.int64)
        elif kind == 'mcl':
            y = y.astype(numpy.int64)
        else:
            raise AssertionError("unknown '{}'".format(kind))

        if n_targets != 1:
            yn = numpy.empty((y.shape[0], n_targets), dtype=dtype)
            for i in range(n_targets):
                yn[:, i] = y + i
            y = yn
        X_train, X_test, y_train, _ = train_test_split(X, y, random_state=11)
        X_test = X_test.astype(dtype)
        if kind in ('bin', 'mcl'):
            clr = KNeighborsClassifier(metric_params=metric_params, **kwargs)
        elif kind == 'reg':
            clr = KNeighborsRegressor(metric_params=metric_params, **kwargs)
        else:
            raise NotImplementedError(kind)
        clr.fit(X_train, y_train)

        if optim is None:
            options = None
        else:
            options = {clr.__class__: {'optim': 'cdist'}}
        if not largest0:
            if options is None:
                options = {}
            if clr.__class__ not in options:
                options[clr.__class__] = {}
            options[clr.__class__].update({'largest0': False})

        if target_opset is None:
            opsets = list(
                sorted(set([9, 10, 11, 12,
                            get_opset_number_from_onnx()])))
        else:
            opsets = [target_opset]
        for ops in opsets:
            if ops is None:
                raise AssertionError("Cannot happen: {}.".format(opsets))
            with self.subTest(target_opset=ops):
                try:
                    model_def = to_onnx(clr,
                                        X_train.astype(dtype),
                                        rewrite_ops=True,
                                        target_opset=ops,
                                        options=options)
                except NameError as e:
                    if "Option 'largest0' not in" in str(e):
                        continue
                if 'onnxruntime' in runtime:
                    model_def.ir_version = get_ir_version_from_onnx()
                try:
                    if runtime == 'onnxruntime2':
                        oinf = _capture_output(
                            lambda: OnnxInference(model_def, runtime=runtime),  # pylint: disable=W0640
                            'c')[0]
                    else:
                        oinf = OnnxInference(model_def, runtime=runtime)
                except (RuntimeError, TypeError, OrtInvalidArgument) as e:
                    if "No Op registered for Identity with domain_version of 12" in str(
                            e):
                        continue
                    if debug:
                        raise AssertionError(
                            "Unable to create a model for target_opset={}\n----\n{}\n----"
                            .format(ops,
                                    str(model_def)[:100])) from e
                    if "Unknown model file format version." in str(e):
                        continue
                    raise AssertionError(
                        "Unable to create model for opset={} and runtime='{}'\n{}"
                        "".format(ops, runtime,
                                  str(model_def)[:100])) from e

                if debug:
                    y = oinf.run({'X': X_test}, verbose=level, fLOG=print)
                else:
                    y = oinf.run({'X': X_test})

                lexp = clr.predict(X_test)
                if kind == 'reg':
                    self.assertEqual(list(sorted(y)), ['variable'])
                    if dtype == numpy.float32:
                        self.assertEqualArray(lexp,
                                              y['variable'],
                                              decimal=5,
                                              squeeze=True)
                    else:
                        self.assertEqualArray(lexp,
                                              y['variable'],
                                              squeeze=True)
                else:
                    self.assertEqual(list(sorted(y)),
                                     ['output_label', 'output_probability'])
                    self.assertEqualArray(lexp, y['output_label'])
                    lprob = clr.predict_proba(X_test)
                    self.assertEqualArray(lprob,
                                          DataFrame(
                                              y['output_probability']).values,
                                          decimal=5)