예제 #1
0
    def doTestFtrlwithoutRegularization(self, use_resource=False):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.float32]:
            with tf.Graph().as_default(), self.cached_session(use_gpu=True):
                if use_resource:
                    var0 = tf.Variable([0.0, 0.0], dtype=dtype)
                    var1 = tf.Variable([0.0, 0.0], dtype=dtype)
                else:
                    var0 = tf.Variable([0.0, 0.0], dtype=dtype)
                    var1 = tf.Variable([0.0, 0.0], dtype=dtype)
                grads0 = tf.constant([0.1, 0.2], dtype=dtype)
                grads1 = tf.constant([0.01, 0.02], dtype=dtype)
                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.0,
                                l2_regularization_strength=0.0)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                self.evaluate(tf.compat.v1.global_variables_initializer())

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllClose([0.0, 0.0], v0_val)
                self.assertAllClose([0.0, 0.0], v1_val)

                # Run 3 steps FTRL
                for _ in range(3):
                    update.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-2.60260963, -4.29698515]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.28432083, -0.56694895]), v1_val)
예제 #2
0
    def testFtrlwithoutRegularization2(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.half, tf.float32]:
            with tf.Graph().as_default(), self.cached_session(use_gpu=True):
                var0 = tf.Variable([1.0, 2.0], dtype=dtype)
                var1 = tf.Variable([4.0, 3.0], dtype=dtype)
                grads0 = tf.constant([0.1, 0.2], dtype=dtype)
                grads1 = tf.constant([0.01, 0.02], dtype=dtype)

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.0,
                                l2_regularization_strength=0.0)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                self.evaluate(tf.compat.v1.global_variables_initializer())

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

                # Run 3 steps FTRL
                for _ in range(3):
                    update.run()
                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-2.55607247, -3.98729396]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.28232238, -0.56096673]), v1_val)
예제 #3
0
    def testFtrlWithBeta(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.half, tf.float32]:
            with tf.Graph().as_default(), self.cached_session(use_gpu=True):
                var0 = tf.Variable([1.0, 2.0], dtype=dtype)
                var1 = tf.Variable([4.0, 3.0], dtype=dtype)
                grads0 = tf.constant([0.1, 0.2], dtype=dtype)
                grads1 = tf.constant([0.01, 0.02], dtype=dtype)

                opt = ftrl.Ftrl(3.0, initial_accumulator_value=0.1, beta=0.1)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                self.evaluate(tf.compat.v1.global_variables_initializer())

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update.run()
                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-6.096838, -9.162214]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.717741, -1.425132]), v1_val)
예제 #4
0
    def testFtrlWithL1_L2(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.half, tf.float32]:
            with tf.Graph().as_default(), self.cached_session():
                var0 = tf.Variable([1.0, 2.0], dtype=dtype)
                var1 = tf.Variable([4.0, 3.0], dtype=dtype)
                grads0 = tf.constant([0.1, 0.2], dtype=dtype)
                grads1 = tf.constant([0.01, 0.02], dtype=dtype)

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.001,
                                l2_regularization_strength=2.0)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                self.evaluate(tf.compat.v1.global_variables_initializer())

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-0.24059935, -0.46829352]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.02406147, -0.04830509]), v1_val)
예제 #5
0
    def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
        """Verifies that l2 shrinkage in FTRL does not change lr schedule."""
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.half, tf.float32]:
            with tf.Graph().as_default(), self.cached_session(
                    use_gpu=True) as sess:
                var0 = tf.Variable([1.0, 2.0], dtype=dtype)
                var1 = tf.Variable([1.0, 2.0], dtype=dtype)
                grads0 = tf.constant([0.1, 0.2], dtype=dtype)
                grads1 = tf.constant([0.1, 0.2], dtype=dtype)

                opt0 = ftrl.Ftrl(3.0,
                                 initial_accumulator_value=0.1,
                                 l1_regularization_strength=0.001,
                                 l2_regularization_strength=2.0,
                                 l2_shrinkage_regularization_strength=0.1)
                opt1 = ftrl.Ftrl(3.0,
                                 initial_accumulator_value=0.1,
                                 l1_regularization_strength=0.001,
                                 l2_regularization_strength=2.0)
                update0 = opt0.apply_gradients([(grads0, var0)])
                update1 = opt1.apply_gradients([(grads1, var1)])
                self.evaluate(tf.compat.v1.global_variables_initializer())

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([1.0, 2.0], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update0.run()
                    update1.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                # var0 is experiencing L2 shrinkage so it should be smaller than var1
                # in magnitude.
                self.assertTrue((v0_val**2 < v1_val**2).all())
                accum0 = sess.run(opt0.get_slot(var0, "accumulator"))
                accum1 = sess.run(opt1.get_slot(var1, "accumulator"))
                # L2 shrinkage should not change how we update grad accumulator.
                self.assertAllCloseAccordingToType(accum0, accum1)
예제 #6
0
    def testEquivGradientDescentwithoutRegularization(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.half, tf.float32]:
            with tf.Graph().as_default(), self.cached_session(use_gpu=True):
                val0, val1 = self.applyOptimizer(
                    ftrl.Ftrl(
                        3.0,
                        # Fixed learning rate
                        learning_rate_power=-0.0,
                        initial_accumulator_value=0.1,
                        l1_regularization_strength=0.0,
                        l2_regularization_strength=0.0),
                    dtype)

            with tf.Graph().as_default(), self.cached_session(use_gpu=True):
                val2, val3 = self.applyOptimizer(
                    tf.compat.v1.train.GradientDescentOptimizer(3.0), dtype)

            self.assertAllCloseAccordingToType(val0, val2)
            self.assertAllCloseAccordingToType(val1, val3)
예제 #7
0
    def testMinimizeSparseResourceVariable(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.half, tf.float32, tf.float64]:
            with tf.Graph().as_default(), self.cached_session(use_gpu=True):
                var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
                x = tf.constant([[4.0], [5.0]], dtype=dtype)

                def loss():
                    pred = tf.matmul(
                        tf.compat.v1.nn.embedding_lookup([var0], [0]), x)  # pylint: disable=cell-var-from-loop
                    return pred * pred

                sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0])
                self.evaluate(tf.compat.v1.global_variables_initializer())
                # Fetch params to validate initial values
                self.assertAllCloseAccordingToType([[1.0, 2.0]],
                                                   self.evaluate(var0))
                # Run 1 step of sgd
                sgd_op.run()
                # Validate updated params
                self.assertAllCloseAccordingToType([[0, 1]],
                                                   self.evaluate(var0),
                                                   atol=0.01)
예제 #8
0
    def testFtrlWithL1_L2_L2Shrinkage(self):
        """Test the new FTRL op with support for l2 shrinkage.

    The addition of this parameter which places a constant pressure on weights
    towards the origin causes the gradient descent trajectory to differ. The
    weights will tend to have smaller magnitudes with this parameter set.
    """
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.half, tf.float32]:
            with tf.Graph().as_default(), self.cached_session(use_gpu=True):
                var0 = tf.Variable([1.0, 2.0], dtype=dtype)
                var1 = tf.Variable([4.0, 3.0], dtype=dtype)
                grads0 = tf.constant([0.1, 0.2], dtype=dtype)
                grads1 = tf.constant([0.01, 0.02], dtype=dtype)

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.001,
                                l2_regularization_strength=2.0,
                                l2_shrinkage_regularization_strength=0.1)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                self.evaluate(tf.compat.v1.global_variables_initializer())

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
                self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType(
                    np.array([-0.22578995, -0.44345796]), v0_val)
                self.assertAllCloseAccordingToType(
                    np.array([-0.14378493, -0.13229476]), v1_val)
예제 #9
0
    def testFtrlWithL1_L2_L2ShrinkageSparse(self):
        """Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [tf.half, tf.float32]:
            with tf.Graph().as_default(), self.cached_session(use_gpu=True):
                var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
                var1 = tf.Variable([[4.0], [3.0]], dtype=dtype)
                grads0 = tf.IndexedSlices(
                    tf.constant([0.1], shape=[1, 1], dtype=dtype),
                    tf.constant([0]), tf.constant([2, 1]))
                grads1 = tf.IndexedSlices(
                    tf.constant([0.02], shape=[1, 1], dtype=dtype),
                    tf.constant([1]), tf.constant([2, 1]))

                opt = ftrl.Ftrl(3.0,
                                initial_accumulator_value=0.1,
                                l1_regularization_strength=0.001,
                                l2_regularization_strength=2.0,
                                l2_shrinkage_regularization_strength=0.1)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                self.evaluate(tf.compat.v1.global_variables_initializer())

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
                self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val)

                # Run 10 steps FTRL
                for _ in range(10):
                    update.run()

                v0_val, v1_val = self.evaluate([var0, var1])
                self.assertAllCloseAccordingToType([[-0.22578995], [2.]],
                                                   v0_val)
                self.assertAllCloseAccordingToType([[4.], [-0.13229476]],
                                                   v1_val)
]

adadelta_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
    "AdadeltaKerasV2", lambda: adadelta_keras_v2.Adadelta(0.001))
adagrad_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
    "AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
    "AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
adam_experimental_fn = tf.__internal__.test.combinations.NamedObject(
    "AdamExperimental", lambda: adam_experimental.Adam(0.001))
adamax_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
    "AdamaxKerasV2", lambda: adamax_keras_v2.Adamax(0.001, epsilon=1.0))
nadam_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
    "NadamKerasV2", lambda: nadam_keras_v2.Nadam(0.001, epsilon=1.0))
ftrl_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
    "FtrlKerasV2", lambda: ftrl_keras_v2.Ftrl(0.001))
gradient_descent_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
    "GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.001))
rmsprop_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
    "RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))

# TODO(shiningsun): consider adding the other v2 optimizers
optimizers_v2 = [
    gradient_descent_optimizer_keras_v2_fn, adagrad_optimizer_keras_v2_fn
]

optimizers_v1_and_v2 = optimizers_v1 + optimizers_v2


def distributions_and_v1_optimizers():
    """A common set of combination with DistributionStrategies and Optimizers."""