Beispiel #1
0
    def testFtrlWithL1_L2(self):
        for dtype in [dtypes.half, dtypes.float32]:
            with self.cached_session() as sess:
                var0 = variables.Variable([1.0, 2.0], dtype=dtype)
                var1 = variables.Variable([4.0, 3.0], dtype=dtype)
                grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
                grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.001,
                                l2_regularization_strength=2.0)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-0.24059935, -0.46829352]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.02406147, -0.04830509]), v1_val)
Beispiel #2
0
    def testFtrlWithL1_L2_L2ShrinkageSparse(self):
        """Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
        for dtype in [dtypes.half, dtypes.float32]:
            with self.cached_session() as sess:
                var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
                var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
                grads0 = ops.IndexedSlices(
                    constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
                    constant_op.constant([0]), constant_op.constant([2, 1]))
                grads1 = ops.IndexedSlices(
                    constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
                    constant_op.constant([1]), constant_op.constant([2, 1]))

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.001,
                                l2_regularization_strength=2.0,
                                l2_shrinkage_regularization_strength=0.1)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
                self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([[-0.22578995], [2.]],
                                                   v0_val)
                self.assertAllCloseAccordingToType([[4.], [-0.13229476]],
                                                   v1_val)
Beispiel #3
0
    def testFtrlwithoutRegularization2(self):
        for dtype in [dtypes.half, dtypes.float32]:
            with self.cached_session() as sess:
                var0 = variables.Variable([1.0, 2.0], dtype=dtype)
                var1 = variables.Variable([4.0, 3.0], dtype=dtype)
                grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
                grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.0,
                                l2_regularization_strength=0.0)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

                # Run 3 steps FTRL
                for _ in range(3):
                    update.run()
                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-2.55607247, -3.98729396]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.28232238, -0.56096673]), v1_val)
Beispiel #4
0
  def testFtrlWithL2_Beta(self):
    # TODO(tanzheny, omalleyt): Fix test in eager mode.
    for dtype in [dtypes.half, dtypes.float32]:
      with ops.Graph().as_default(), self.cached_session():
        var0 = variables.Variable([1.0, 2.0], dtype=dtype)
        var1 = variables.Variable([4.0, 3.0], dtype=dtype)
        grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
        grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)

        opt = ftrl.Ftrl(
            3.0,
            initial_accumulator_value=0.1,
            l1_regularization_strength=0.0,
            l2_regularization_strength=0.1,
            beta=0.1)
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        self.evaluate(variables.global_variables_initializer())

        v0_val, v1_val = self.evaluate([var0, var1])
        self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
        self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

        # Run 10 steps FTRL
        for _ in range(10):
          update.run()
        v0_val, v1_val = self.evaluate([var0, var1])
        self.assertAllCloseAccordingToType(
            np.array([-2.735487, -4.704625]), v0_val)
        self.assertAllCloseAccordingToType(
            np.array([-0.294335, -0.586556]), v1_val)
    def testFtrlWithL1(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [dtypes.half, dtypes.float32]:
            with ops.Graph().as_default(), self.cached_session(use_gpu=True):
                var0 = variables.Variable([1.0, 2.0], dtype=dtype)
                var1 = variables.Variable([4.0, 3.0], dtype=dtype)
                grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
                grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.001,
                                l2_regularization_strength=0.0)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update.run()
                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-7.66718769, -10.91273689]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.93460727, -1.86147261]), v1_val)
Beispiel #6
0
  def testFtrlWithBeta(self):
    # TODO(tanzheny, omalleyt): Fix test in eager mode.
    for dtype in [dtypes.half, dtypes.float32]:
      with ops.Graph().as_default(), self.cached_session(use_gpu=True):
        var0 = variables.Variable([1.0, 2.0], dtype=dtype)
        var1 = variables.Variable([4.0, 3.0], dtype=dtype)
        grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
        grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)

        opt = ftrl.Ftrl(3.0, initial_accumulator_value=0.1, beta=0.1)
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        self.evaluate(variables.global_variables_initializer())

        v0_val, v1_val = self.evaluate([var0, var1])
        self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
        self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

        # Run 10 steps FTRL
        for _ in range(10):
          update.run()
        v0_val, v1_val = self.evaluate([var0, var1])
        self.assertAllCloseAccordingToType(
            np.array([-6.096838, -9.162214]), v0_val)
        self.assertAllCloseAccordingToType(
            np.array([-0.717741, -1.425132]), v1_val)
Beispiel #7
0
    def doTestFtrlwithoutRegularization(self, use_resource=False):
        for dtype in [dtypes.half, dtypes.float32]:
            with self.cached_session() as sess:
                if use_resource:
                    var0 = resource_variable_ops.ResourceVariable([0.0, 0.0],
                                                                  dtype=dtype)
                    var1 = resource_variable_ops.ResourceVariable([0.0, 0.0],
                                                                  dtype=dtype)
                else:
                    var0 = variables.Variable([0.0, 0.0], dtype=dtype)
                    var1 = variables.Variable([0.0, 0.0], dtype=dtype)
                grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
                grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.0,
                                l2_regularization_strength=0.0)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllClose([0.0, 0.0], v0_val)
                self.assertAllClose([0.0, 0.0], v1_val)

                # Run 3 steps FTRL
                for _ in range(3):
                    update.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-2.60260963, -4.29698515]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.28432083, -0.56694895]), v1_val)
Beispiel #8
0
  def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
    """Verifies that l2 shrinkage in FTRL does not change lr schedule."""
    # TODO(tanzheny, omalleyt): Fix test in eager mode.
    for dtype in [dtypes.half, dtypes.float32]:
      with ops.Graph().as_default(), self.cached_session() as sess:
        var0 = variables.Variable([1.0, 2.0], dtype=dtype)
        var1 = variables.Variable([1.0, 2.0], dtype=dtype)
        grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
        grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)

        opt0 = ftrl.Ftrl(
            3.0,
            initial_accumulator_value=0.1,
            l1_regularization_strength=0.001,
            l2_regularization_strength=2.0,
            l2_shrinkage_regularization_strength=0.1)
        opt1 = ftrl.Ftrl(
            3.0,
            initial_accumulator_value=0.1,
            l1_regularization_strength=0.001,
            l2_regularization_strength=2.0)
        update0 = opt0.apply_gradients([(grads0, var0)])
        update1 = opt1.apply_gradients([(grads1, var1)])
        self.evaluate(variables.global_variables_initializer())

        v0_val, v1_val = self.evaluate([var0, var1])
        self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
        self.assertAllCloseAccordingToType([1.0, 2.0], v1_val)

        # Run 10 steps FTRL
        for _ in range(10):
          update0.run()
          update1.run()

        v0_val, v1_val = self.evaluate([var0, var1])
        # var0 is experiencing L2 shrinkage so it should be smaller than var1
        # in magnitude.
        self.assertTrue((v0_val**2 < v1_val**2).all())
        accum0 = sess.run(opt0.get_slot(var0, "accumulator"))
        accum1 = sess.run(opt1.get_slot(var1, "accumulator"))
        # L2 shrinkage should not change how we update grad accumulator.
        self.assertAllCloseAccordingToType(accum0, accum1)
Beispiel #9
0
  def test_numpy_input_fn_with_optimizer_instance(self):
    """Tests complete flow with optimizer_v2 instance."""
    label_dimension = 2
    batch_size = 10
    train_input_fn, eval_input_fn, predict_input_fn = self._create_input_fn(
        label_dimension, batch_size)

    self._test_complete_flow(
        train_input_fn=train_input_fn,
        eval_input_fn=eval_input_fn,
        predict_input_fn=predict_input_fn,
        input_dimension=label_dimension,
        label_dimension=label_dimension,
        batch_size=batch_size,
        optimizer=ftrl_v2.Ftrl(0.01))  # Test with optimizer_v2 instance
 def testMinimizeSparseResourceVariable(self):
   for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
     with self.cached_session():
       var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
       x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
       pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
       loss = pred * pred
       sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0])
       variables.global_variables_initializer().run()
       # Fetch params to validate initial values
       self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
       # Run 1 step of sgd
       sgd_op.run()
       # Validate updated params
       self.assertAllCloseAccordingToType([[0, 1]],
                                          self.evaluate(var0),
                                          atol=0.01)
Beispiel #11
0
    def testEquivGradientDescentwithoutRegularization(self):
        for dtype in [dtypes.half, dtypes.float32]:
            with self.cached_session():
                val0, val1 = self.applyOptimizer(
                    ftrl.Ftrl(
                        3.0,
                        # Fixed learning rate
                        learning_rate_power=-0.0,
                        initial_accumulator_value=0.1,
                        l1_regularization_strength=0.0,
                        l2_regularization_strength=0.0),
                    dtype)

            with self.cached_session():
                val2, val3 = self.applyOptimizer(
                    gradient_descent.GradientDescentOptimizer(3.0), dtype)

            self.assertAllCloseAccordingToType(val0, val2)
            self.assertAllCloseAccordingToType(val1, val3)
    def testEquivGradientDescentwithoutRegularization(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [dtypes.half, dtypes.float32]:
            with ops.Graph().as_default(), self.cached_session(use_gpu=True):
                val0, val1 = self.applyOptimizer(
                    ftrl.Ftrl(
                        3.0,
                        # Fixed learning rate
                        learning_rate_power=-0.0,
                        initial_accumulator_value=0.1,
                        l1_regularization_strength=0.0,
                        l2_regularization_strength=0.0),
                    dtype)

            with ops.Graph().as_default(), self.cached_session(use_gpu=True):
                val2, val3 = self.applyOptimizer(
                    gradient_descent.GradientDescentOptimizer(3.0), dtype)

            self.assertAllCloseAccordingToType(val0, val2)
            self.assertAllCloseAccordingToType(val1, val3)
Beispiel #13
0
  def testMinimizeSparseResourceVariable(self):
    # TODO(tanzheny, omalleyt): Fix test in eager mode.
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with ops.Graph().as_default(), self.cached_session():
        var0 = variables.Variable([[1.0, 2.0]], dtype=dtype)
        x = constant_op.constant([[4.0], [5.0]], dtype=dtype)

        def loss():
          pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)  # pylint: disable=cell-var-from-loop
          return pred * pred

        sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0])
        self.evaluate(variables.global_variables_initializer())
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType([[0, 1]],
                                           self.evaluate(var0),
                                           atol=0.01)
Beispiel #14
0
    def testEquivAdagradwithoutRegularization(self):
        for dtype in [dtypes.half, dtypes.float32]:
            with self.cached_session(use_gpu=True):
                val0, val1 = self.applyOptimizer(
                    ftrl.Ftrl(
                        3.0,
                        # Adagrad learning rate
                        learning_rate_power=-0.5,
                        initial_accumulator_value=0.1,
                        l1_regularization_strength=0.0,
                        l2_regularization_strength=0.0),
                    dtype)

            with self.cached_session(use_gpu=True):
                val2, val3 = self.applyOptimizer(
                    adagrad.AdagradOptimizer(3.0,
                                             initial_accumulator_value=0.1),
                    dtype)

            self.assertAllCloseAccordingToType(val0, val2)
            self.assertAllCloseAccordingToType(val1, val3)
    def testFtrlWithL1_L2_L2Shrinkage(self):
        """Test the new FTRL op with support for l2 shrinkage.

    The addition of this parameter which places a constant pressure on weights
    towards the origin causes the gradient descent trajectory to differ. The
    weights will tend to have smaller magnitudes with this parameter set.
    """
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [dtypes.half, dtypes.float32]:
            with ops.Graph().as_default(), self.cached_session(use_gpu=True):
                var0 = variables.Variable([1.0, 2.0], dtype=dtype)
                var1 = variables.Variable([4.0, 3.0], dtype=dtype)
                grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
                grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.001,
                                l2_regularization_strength=2.0,
                                l2_shrinkage_regularization_strength=0.1)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-0.22578995, -0.44345796]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.14378493, -0.13229476]), v1_val)
# TODO(shiningsun): consider adding the other v1 optimizers
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]

adadelta_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdadeltaKerasV2", lambda: adadelta_keras_v2.Adadelta(0.001))
adagrad_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
adamax_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdamaxKerasV2", lambda: adamax_keras_v2.Adamax(0.001, epsilon=1.0))
nadam_optimizer_keras_v2_fn = combinations.NamedObject(
    "NadamKerasV2", lambda: nadam_keras_v2.Nadam(0.001, epsilon=1.0))
ftrl_optimizer_keras_v2_fn = combinations.NamedObject(
    "FtrlKerasV2", lambda: ftrl_keras_v2.Ftrl(0.001))
gradient_descent_optimizer_keras_v2_fn = combinations.NamedObject(
    "GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.2))
rmsprop_optimizer_keras_v2_fn = combinations.NamedObject(
    "RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))

# TODO(shiningsun): consider adding the other v2 optimizers
optimizers_v2 = [
    gradient_descent_optimizer_keras_v2_fn, adagrad_optimizer_keras_v2_fn
]

optimizers_v1_and_v2 = optimizers_v1 + optimizers_v2

graph_and_eager_modes = ["graph", "eager"]

Beispiel #17
0
HP_IN_GRAPH = {
    'Adam': ['decay', 'learning_rate'],
    'Ftrl': [
        'decay', 'l1_regularization_strength', 'l2_regularization_strength',
        'learning_rate', 'learning_rate_power'
    ],
    'RMSProp': ['decay', 'learning_rate', 'momentum', 'rho'],
    'Adagrad': ['decay', 'learning_rate'],
    'SGD': ['decay', 'learning_rate', 'momentum'],
}

# optimizer v2 instance.
OPT_V2_INSTANCE = {
    'Adagrad': adagrad.Adagrad(),
    'Adam': adam.Adam(),
    'Ftrl': ftrl.Ftrl(),
    'RMSProp': rmsprop.RMSprop(),
    'SGD': gradient_descent.SGD(),
}


def _add_new_variable(initial_value, var_name_v2, var_name_v1, var_map,
                      var_names_map):
    """Creates a new variable and add it to the variable maps."""
    var = tf.Variable(initial_value, name=var_name_v2)
    var_map[var_name_v2] = var
    var_names_map[var_name_v2] = var_name_v1


def _add_opt_variable(opt_name_v2, var_name_v1, idx, suffix_v2, reader,
                      var_map, var_names_map):