Example #1
0
 def _apply_sparse(self, grad, var):
   accum = self.get_slot(var, "accum")
   linear = self.get_slot(var, "linear")
   if self._l2_shrinkage_regularization_strength <= 0.0:
     return training_ops.sparse_apply_ftrl(
         var,
         accum,
         linear,
         grad.values,
         grad.indices,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         math_ops.cast(self._l1_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
         use_locking=self._use_locking)
   else:
     return training_ops.sparse_apply_ftrl_v2(
         var,
         accum,
         linear,
         grad.values,
         grad.indices,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         math_ops.cast(self._l1_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
                       grad.dtype.base_dtype),
         math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
         use_locking=self._use_locking)
Example #2
0
 def _apply_sparse(self, grad, var):
   accum = self.get_slot(var, "accum")
   linear = self.get_slot(var, "linear")
   if self._l2_shrinkage_regularization_strength <= 0.0:
     return training_ops.sparse_apply_ftrl(
         var,
         accum,
         linear,
         grad.values,
         grad.indices,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         math_ops.cast(self._l1_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
         use_locking=self._use_locking)
   else:
     return training_ops.sparse_apply_ftrl_v2(
         var,
         accum,
         linear,
         grad.values,
         grad.indices,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         math_ops.cast(self._l1_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
                       grad.dtype.base_dtype),
         math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
         use_locking=self._use_locking)
Example #3
0
 def _apply_sparse(self, grad, var):
   accum = self.get_slot(var, "accum")
   linear = self.get_slot(var, "linear")
   return training_ops.sparse_apply_ftrl(
       var, accum, linear, grad.values, grad.indices,
       self._learning_rate_tensor,
       self._l1_regularization_strength_tensor,
       self._l2_regularization_strength_tensor,
       self._learning_rate_power_tensor,
       use_locking=self._use_locking)
Example #4
0
    def _testTypesForSparseFtrlMultiplyLinearByLr(self,
                                                  x,
                                                  y,
                                                  z,
                                                  lr,
                                                  grad,
                                                  indices,
                                                  l1=0.0,
                                                  l2=0.0,
                                                  lr_power=-0.5):
        self.setUp()
        with self.session(use_gpu=False):
            var = variables.VariableV1(x)
            accum = variables.VariableV1(y)
            linear = variables.VariableV1(z)
            self.evaluate(variables.global_variables_initializer())

            self.assertAllCloseAccordingToType(x, self.evaluate(var))
            sparse_apply_ftrl = (training_ops.sparse_apply_ftrl(
                var,
                accum,
                linear,
                grad,
                constant_op.constant(indices, self._toType(indices.dtype)),
                lr,
                l1,
                l2,
                lr_power=lr_power,
                multiply_linear_by_lr=True))
            out = self.evaluate(sparse_apply_ftrl)
            self.assertShapeEqual(out, sparse_apply_ftrl)

            for (i, index) in enumerate(indices):
                self.assertAllCloseAccordingToType(
                    x[index] - lr * grad[i] *
                    (y[index] + grad[i] * grad[i])**(lr_power),
                    self.evaluate(var)[index])
                self.assertAllCloseAccordingToType(
                    y[index] + grad[i] * grad[i],
                    self.evaluate(accum)[index])
Example #5
0
    def _testTypesForSparseFtrl(self,
                                x,
                                y,
                                z,
                                lr,
                                grad,
                                indices,
                                l1=0.0,
                                l2=0.0,
                                lr_power=-0.5):
        self.setUp()
        with self.test_session(use_gpu=False):
            var = variables.Variable(x)
            accum = variables.Variable(y)
            linear = variables.Variable(z)
            variables.initialize_all_variables().run()

            self.assertAllCloseAccordingToType(x, var.eval())
            sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
                var,
                accum,
                linear,
                grad,
                constant_op.constant(indices, self._toType(indices.dtype)),
                lr,
                l1,
                l2,
                lr_power=lr_power)
            out = sparse_apply_ftrl.eval()
            self.assertShapeEqual(out, sparse_apply_ftrl)

            for (i, index) in enumerate(indices):
                self.assertAllCloseAccordingToType(
                    x[index] - lr * grad[i] *
                    (y[index] + grad[i] * grad[i])**(lr_power),
                    var.eval()[index])
                self.assertAllCloseAccordingToType(
                    y[index] + grad[i] * grad[i],
                    accum.eval()[index])
Example #6
0
  def _testTypesForSparseFtrl(self,
                              x,
                              y,
                              z,
                              lr,
                              grad,
                              indices,
                              l1=0.0,
                              l2=0.0,
                              lr_power=-0.5):
    self.setUp()
    with self.session(use_gpu=False):
      var = variables.VariableV1(x)
      accum = variables.VariableV1(y)
      linear = variables.VariableV1(z)
      self.evaluate(variables.global_variables_initializer())

      self.assertAllCloseAccordingToType(x, self.evaluate(var))
      sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
          var,
          accum,
          linear,
          grad,
          constant_op.constant(indices, self._toType(indices.dtype)),
          lr,
          l1,
          l2,
          lr_power=lr_power)
      out = self.evaluate(sparse_apply_ftrl)
      self.assertShapeEqual(out, sparse_apply_ftrl)

      for (i, index) in enumerate(indices):
        self.assertAllCloseAccordingToType(
            x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i])**
            (lr_power),
            self.evaluate(var)[index])
        self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],
                                           self.evaluate(accum)[index])
Example #7
0
  def _testTypesForSparseFtrl(self, x, y, z, lr, grad, indices, l1=0.0, l2=0.0,
                              lr_power=-0.5):
    self.setUp()
    with self.test_session(use_gpu=False):
      var = variables.Variable(x)
      accum = variables.Variable(y)
      linear = variables.Variable(z)
      variables.initialize_all_variables().run()

      self.assertAllEqual(x, var.eval())
      sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
          var, accum, linear, grad,
          constant_op.constant(indices, self._toType(indices.dtype)),
          lr, l1, l2, lr_power=lr_power)
      out = sparse_apply_ftrl.eval()
      self.assertShapeEqual(out, sparse_apply_ftrl)

      for (i, index) in enumerate(indices):
        self.assertAllClose(
            x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (
                lr_power),
            var.eval()[index])
        self.assertAllEqual(y[index] + grad[i] * grad[i], accum.eval()[index])