コード例 #1
0
 def test_learning_rate_sgd_regressor_optimal(self):
     cllr = LearningRateSGD(learning_rate='optimal')
     val = list(cllr.loop())
     self.assertEqual(len(val), 1000)
     self.is_decreased(val)
     self.assertEqual(val[0], 0.01)
     self.assertGreater(val[-1], 0.009)
コード例 #2
0
    def wtest_ort_gradient_optimizers_grid_cls(self, use_weight=False):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (LearningRateSGD)
        from onnxcustom.training.sgd_learning_loss import NegLogLearningLoss
        values = [
            1e-7, 1e-6, 5e-6, 1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 1e-1,
            1, 10, 100, 1000
        ]
        X = numpy.random.randn(30, 3).astype(numpy.float32)
        y = (X.sum(axis=1) >= 0).astype(numpy.int64).reshape((-1, 1))
        X += numpy.random.randn(30, 3).astype(numpy.float32) / 10
        X_train, _, y_train, __ = train_test_split(X, y)
        scorer = make_scorer(lambda y_true, y_pred:
                             (-log_loss(y_true, y_pred)))  # pylint: disable=E1130
        reg = GridSearchCV(SGDClassifier(max_iter=20),
                           param_grid={'eta0': values},
                           scoring=scorer,
                           cv=3)
        reg.fit(X_train, y_train.ravel())
        self.assertIsInstance(reg.best_params_, dict)
        self.assertIn('eta0', reg.best_params_)
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearClassifier'},
                      options={'zipmap': False})
        onx = select_model_inputs_outputs(onx, outputs=['score'])
        onx = onnx_rename_weights(onx)
        inits = ['I0_coef', 'I1_intercept']

        cvalues = [LearningRateSGD(v) for v in values]
        grid = GridSearchCV(OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGD(1e-4),
            learning_loss=NegLogLearningLoss(),
            warm_start=False,
            max_iter=20,
            batch_size=10,
            enable_logging=False,
            exc=False),
                            param_grid={'learning_rate': cvalues},
                            cv=3)
        if use_weight:
            grid.fit(X_train, y_train)
        else:
            grid.fit(X_train, y_train)
        self.assertIsInstance(grid.best_params_, dict)
        self.assertEqual(len(grid.best_params_), 1)
        self.assertIsInstance(grid.best_params_['learning_rate'],
                              LearningRateSGD)
コード例 #3
0
    def wtest_ort_gradient_optimizers_grid_reg(self, use_weight=False):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (LearningRateSGD)
        from onnxcustom.training.sgd_learning_loss import SquareLearningLoss
        values = [
            1e-6, 1e-5, 5e-5, 8e-5, 1e-4, 2e-4, 5e-4, 1e-3, 1e-2, 1e-1, 1
        ]
        X = numpy.random.randn(30, 3).astype(numpy.float32)
        y = X.sum(axis=1).reshape((-1, 1))
        y += numpy.random.randn(y.shape[0]).astype(numpy.float32).reshape(
            (-1, 1)) / 10
        X_train, _, y_train, __ = train_test_split(X, y)
        scorer = make_scorer(lambda y_true, y_pred:
                             (-mean_squared_error(y_true, y_pred)))  # pylint: disable=E1130
        reg = GridSearchCV(SGDRegressor(max_iter=20),
                           param_grid={'eta0': values},
                           scoring=scorer,
                           cv=3,
                           error_score='raise')
        reg.fit(X_train, y_train.ravel())
        self.assertIsInstance(reg.best_params_, dict)
        self.assertIn('eta0', reg.best_params_)
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'})
        onx = onnx_rename_weights(onx)
        inits = ['I0_coef', 'I1_intercept']

        cvalues = [LearningRateSGD(v) for v in values]
        grid = GridSearchCV(OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGD(1e-4),
            learning_loss=SquareLearningLoss(),
            warm_start=False,
            max_iter=20,
            batch_size=10,
            enable_logging=False,
            exc=False),
                            param_grid={'learning_rate': cvalues},
                            cv=3)
        if use_weight:
            grid.fit(X_train, y_train)
        else:
            grid.fit(X_train, y_train)
        self.assertIsInstance(grid.best_params_, dict)
        self.assertEqual(len(grid.best_params_), 1)
        self.assertIsInstance(grid.best_params_['learning_rate'],
                              LearningRateSGD)
コード例 #4
0
 def test_ort_gradient_optimizers_optimal_use_ort(self):
     from onnxcustom.utils.orttraining_helper import add_loss_output
     from onnxcustom.training.optimizers import OrtGradientOptimizer
     X, y = make_regression(  # pylint: disable=W0632
         100,
         n_features=10,
         bias=2,
         random_state=0)
     X = X.astype(numpy.float32)
     y = y.astype(numpy.float32)
     X_train, _, y_train, __ = train_test_split(X, y)
     reg = LinearRegression()
     reg.fit(X_train, y_train)
     reg.coef_ = reg.coef_.reshape((1, -1))
     onx = to_onnx(reg,
                   X_train,
                   target_opset=opset,
                   black_op={'LinearRegressor'})
     onx_loss = add_loss_output(onx)
     inits = ['intercept', 'coef']
     train_session = OrtGradientOptimizer(
         onx_loss,
         inits,
         max_iter=10,
         learning_rate=LearningRateSGD(learning_rate='optimal'))
     self.assertRaise(lambda: train_session.get_state(), AttributeError)
     train_session.fit(X_train, y_train, use_numpy=False)
     state_tensors = train_session.get_state()
     self.assertEqual(len(state_tensors), 2)
     r = repr(train_session)
     self.assertIn("OrtGradientOptimizer(model_onnx=", r)
     self.assertIn("learning_rate='optimal'", r)
     losses = train_session.train_losses_
     self.assertGreater(len(losses), 1)
     self.assertFalse(any(map(numpy.isnan, losses)))
コード例 #5
0
    def wtest_ort_gradient_optimizers_fw_sgd_binary(self, use_weight):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (LearningRateSGD)
        from onnxcustom.training.sgd_learning_loss import NegLogLearningLoss
        X = numpy.arange(60).astype(numpy.float32).reshape((-1, 3))
        y = numpy.arange(X.shape[0]).astype(numpy.float32).reshape(
            (-1, 1)) > 10
        X = X.astype(numpy.float32)
        y = y.astype(numpy.int64)
        y[0, 0] = 0
        y[-1, 0] = 1
        w = (numpy.random.rand(y.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = SGDClassifier(loss='log')
        if use_weight:
            reg.fit(X_train,
                    y_train.ravel(),
                    sample_weight=w_train.astype(numpy.float64))
        else:
            reg.fit(X_train, y_train.ravel())
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'},
                      options={
                          'zipmap': False,
                          'raw_scores': True
                      })
        onx = select_model_inputs_outputs(onx, outputs=['score'])
        self.assertIn("output: name='score'", onnx_simple_text_plot(onx))
        inits = ['coef', 'intercept']

        train_session = OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGD(1e10),
            learning_loss=NegLogLearningLoss(),
            warm_start=False,
            max_iter=100,
            batch_size=10,
            enable_logging=False)
        self.assertIsInstance(train_session.learning_loss, NegLogLearningLoss)
        self.assertEqual(train_session.learning_loss.eps, 1e-5)
        y_train = y_train.reshape((-1, 1))
        if use_weight:
            train_session.fit(X_train, y_train, w_train.reshape((-1, 1)))
        else:
            train_session.fit(X_train, y_train)
        losses = train_session.train_losses_
        self.assertGreater(len(losses), 1)
        if any(map(numpy.isnan, losses)):
            raise AssertionError(losses)
コード例 #6
0
    def wtest_ort_gradient_optimizers_fw_sgd_reg(self, use_weight):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (LearningRateSGD)
        from onnxcustom.training.sgd_learning_loss import SquareLearningLoss
        X = numpy.arange(60).astype(numpy.float32).reshape((-1, 3))
        y = numpy.arange(X.shape[0]).astype(numpy.float32).reshape((-1, 1))
        y[0, 0] += 1
        y[-1, 0] += 1
        w = (numpy.random.rand(y.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = SGDRegressor()
        if use_weight:
            reg.fit(X_train,
                    y_train.ravel(),
                    sample_weight=w_train.astype(numpy.float64))
        else:
            reg.fit(X_train, y_train.ravel())
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'})
        inits = ['coef', 'intercept']

        train_session = OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGD(1e10),
            learning_loss=SquareLearningLoss(),
            warm_start=False,
            max_iter=100,
            batch_size=10,
            enable_logging=False)
        self.assertIsInstance(train_session.learning_loss, SquareLearningLoss)
        y_train = y_train.reshape((-1, 1))
        if use_weight:
            self.assertRaise(
                lambda: train_session.fit(X_train, y_train,
                                          w_train.reshape((-1, 1))),
                ConvergenceError)
        else:
            self.assertRaise(lambda: train_session.fit(X_train, y_train),
                             ConvergenceError)
        losses = train_session.train_losses_
        self.assertLess(len(losses), 2)
コード例 #7
0
    def wtest_ort_gradient_optimizers_score_reg(self, use_weight=False):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (LearningRateSGD)
        from onnxcustom.training.sgd_learning_loss import SquareLearningLoss
        X = numpy.arange(60).astype(numpy.float32).reshape((-1, 3))
        y = numpy.arange(X.shape[0]).astype(numpy.float32).reshape((-1, 1))
        y[0, 0] += 1
        y[-1, 0] += 1
        w = (numpy.random.rand(X.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = SGDRegressor(max_iter=20)
        reg.fit(X_train, y_train.ravel())
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'})
        onx = onnx_rename_weights(onx)
        inits = ['I0_coef', 'I1_intercept']

        model = OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGD(1e-4),
            learning_loss=SquareLearningLoss(),
            warm_start=False,
            max_iter=20,
            batch_size=10)
        if use_weight:
            model.fit(X_train, y_train, w_train)
            losses = model.losses(X_train, y_train, w_train)
            score = model.score(X_train, y_train, w_train)
        else:
            model.fit(X_train, y_train)
            losses = model.losses(X_train, y_train)
            score = model.score(X_train, y_train)
        self.assertEqual(losses.shape[0], y_train.shape[0])
        self.assertFalse(any(map(numpy.isnan, losses)))
        self.assertIsInstance(score, numbers.Number)
        params = model.get_params()
        self.assertIsInstance(params['device'], str)
コード例 #8
0
 def test_learning_rate_sgd_exc(self):
     self.assertRaise(lambda: LearningRateSGD(learning_rate='CST'),
                      ValueError)
コード例 #9
0
 def test_learning_rate_sgd_regressor_constant(self):
     cllr = LearningRateSGD(learning_rate='constant')
     val = list(cllr.loop())
     self.assertEqual(len(val), 1000)
     self.assertEqual(val[0], 0.01)
     self.assertEqual(val[-1], val[0])
コード例 #10
0
 def test_learning_rate_sgd_regressor_exc(self):
     self.assertRaise(lambda: LearningRateSGD(learning_rate='EXC'),
                      ValueError)