예제 #1
0
 def _resource_apply_sparse(self, grad, var, indices):
   accum = self.get_slot(var, "accum")
   linear = self.get_slot(var, "linear")
   if self._l2_shrinkage_regularization_strength <= 0.0:
     return training_ops.resource_sparse_apply_ftrl(
         var.handle,
         accum.handle,
         linear.handle,
         grad,
         indices,
         math_ops.cast(self._learning_rate_tensor, grad.dtype),
         math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
         math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
         math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
         use_locking=self._use_locking)
   else:
     return training_ops.resource_sparse_apply_ftrl_v2(
         var.handle,
         accum.handle,
         linear.handle,
         grad,
         indices,
         math_ops.cast(self._learning_rate_tensor, grad.dtype),
         math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
         math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
         math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
                       grad.dtype),
         math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
         use_locking=self._use_locking)
예제 #2
0
    def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
        var_device, var_dtype = var.device, var.dtype.base_dtype
        coefficients = ((apply_state or {}).get((var_device, var_dtype))
                        or self._fallback_apply_state(var_device, var_dtype))

        accum = self.get_slot(var, 'accumulator')
        linear = self.get_slot(var, 'linear')

        if self._l2_shrinkage_regularization_strength <= 0.0:
            return training_ops.resource_sparse_apply_ftrl(
                var.handle,
                accum.handle,
                linear.handle,
                grad,
                indices,
                coefficients['lr_t'],
                coefficients['l1_regularization_strength'],
                coefficients['l2_regularization_strength'],
                coefficients['learning_rate_power'],
                use_locking=self._use_locking)
        else:
            return training_ops.resource_sparse_apply_ftrl_v2(
                var.handle,
                accum.handle,
                linear.handle,
                grad,
                indices,
                coefficients['lr_t'],
                coefficients['l1_regularization_strength'],
                coefficients['l2_regularization_strength'],
                coefficients['l2_shrinkage_regularization_strength'],
                coefficients['learning_rate_power'],
                use_locking=self._use_locking)
예제 #3
0
파일: ftrl.py 프로젝트: aeverall/tensorflow
 def _resource_apply_sparse(self, grad, var, indices):
   var_dtype = var.dtype.base_dtype
   lr_t = self._decayed_lr(var_dtype)
   learning_rate_power = self._get_hyper('learning_rate_power', var_dtype)
   l1_regularization_strength = self._get_hyper('l1_regularization_strength',
                                                var_dtype)
   l2_regularization_strength = self._get_hyper('l2_regularization_strength',
                                                var_dtype)
   accum = self.get_slot(var, 'accumulator')
   linear = self.get_slot(var, 'linear')
   if self._l2_shrinkage_regularization_strength <= 0.0:
     return training_ops.resource_sparse_apply_ftrl(
         var.handle,
         accum.handle,
         linear.handle,
         grad,
         indices,
         lr_t,
         l1_regularization_strength,
         l2_regularization_strength,
         learning_rate_power,
         use_locking=self._use_locking)
   else:
     return training_ops.resource_sparse_apply_ftrl_v2(
         var.handle,
         accum.handle,
         linear.handle,
         grad,
         indices,
         lr_t,
         l1_regularization_strength,
         l2_regularization_strength,
         math_ops.cast(self._l2_shrinkage_regularization_strength, var_dtype),
         learning_rate_power,
         use_locking=self._use_locking)
예제 #4
0
 def _resource_apply_sparse(self, grad, var, indices):
   accum = self.get_slot(var, "accum")
   linear = self.get_slot(var, "linear")
   if self._l2_shrinkage_regularization_strength <= 0.0:
     return training_ops.resource_sparse_apply_ftrl(
         var.handle,
         accum.handle,
         linear.handle,
         grad,
         indices,
         math_ops.cast(self._learning_rate_tensor, grad.dtype),
         math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
         math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
         math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
         use_locking=self._use_locking)
   else:
     return training_ops.resource_sparse_apply_ftrl_v2(
         var.handle,
         accum.handle,
         linear.handle,
         grad,
         indices,
         math_ops.cast(self._learning_rate_tensor, grad.dtype),
         math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
         math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
         math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
                       grad.dtype),
         math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
         use_locking=self._use_locking)
예제 #5
0
 def _resource_apply_sparse(self, grad, var, indices):
   var_dtype = var.dtype.base_dtype
   lr_t = self._decayed_lr(var_dtype)
   learning_rate_power = self._get_hyper('learning_rate_power', var_dtype)
   l1_regularization_strength = self._get_hyper('l1_regularization_strength',
                                                var_dtype)
   l2_regularization_strength = self._get_hyper('l2_regularization_strength',
                                                var_dtype)
   accum = self.get_slot(var, 'accumulator')
   linear = self.get_slot(var, 'linear')
   if self._l2_shrinkage_regularization_strength <= 0.0:
     return training_ops.resource_sparse_apply_ftrl(
         var.handle,
         accum.handle,
         linear.handle,
         grad,
         indices,
         lr_t,
         l1_regularization_strength,
         l2_regularization_strength,
         learning_rate_power,
         use_locking=self._use_locking)
   else:
     return training_ops.resource_sparse_apply_ftrl_v2(
         var.handle,
         accum.handle,
         linear.handle,
         grad,
         indices,
         lr_t,
         l1_regularization_strength,
         l2_regularization_strength,
         math_ops.cast(self._l2_shrinkage_regularization_strength, var_dtype),
         learning_rate_power,
         use_locking=self._use_locking)