コード例 #1
0
ファイル: ftrl.py プロジェクト: WangZhibin/Super-TensorFlow
 def _apply_sparse(self, grad, var):
   accum = self.get_slot(var, "accum")
   linear = self.get_slot(var, "linear")
   if self._l2_shrinkage_regularization_strength <= 0.0:
     return training_ops.sparse_apply_ftrl(
         var,
         accum,
         linear,
         grad.values,
         grad.indices,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         math_ops.cast(self._l1_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
         use_locking=self._use_locking)
   else:
     return training_ops.sparse_apply_ftrl_v2(
         var,
         accum,
         linear,
         grad.values,
         grad.indices,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         math_ops.cast(self._l1_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
                       grad.dtype.base_dtype),
         math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
         use_locking=self._use_locking)
コード例 #2
0
ファイル: ftrl.py プロジェクト: 1000sprites/tensorflow
 def _apply_sparse(self, grad, var):
   accum = self.get_slot(var, "accum")
   linear = self.get_slot(var, "linear")
   if self._l2_shrinkage_regularization_strength <= 0.0:
     return training_ops.sparse_apply_ftrl(
         var,
         accum,
         linear,
         grad.values,
         grad.indices,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         math_ops.cast(self._l1_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
         use_locking=self._use_locking)
   else:
     return training_ops.sparse_apply_ftrl_v2(
         var,
         accum,
         linear,
         grad.values,
         grad.indices,
         math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
         math_ops.cast(self._l1_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_regularization_strength_tensor,
                       var.dtype.base_dtype),
         math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
                       grad.dtype.base_dtype),
         math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
         use_locking=self._use_locking)