def _resource_apply_sparse(self, grad, var, indices):
     acc = self.get_slot(var, "accumulator")
     return training_ops.resource_sparse_apply_adagrad(
         var.handle,
         acc.handle,
         math_ops.cast(self._learning_rate_tensor, grad.dtype),
         grad,
         indices,
         use_locking=self._use_locking)
Beispiel #2
0
 def _resource_apply_sparse(self, grad, var, indices, state):
   acc = state.get_slot(var, "accumulator")
   return training_ops.resource_sparse_apply_adagrad(
       var.handle,
       acc.handle,
       state.get_hyper("learning_rate", var.dtype.base_dtype),
       grad,
       indices,
       use_locking=self._use_locking)
Beispiel #3
0
 def _resource_apply_sparse(self, grad, var, indices, state):
     acc = state.get_slot(var, "accumulator")
     return training_ops.resource_sparse_apply_adagrad(
         var.handle,
         acc.handle,
         state.get_hyper("learning_rate", var.dtype.base_dtype),
         grad,
         indices,
         use_locking=self._use_locking)
Beispiel #4
0
 def _resource_apply_sparse(self, grad, var, indices):
   acc = self.get_slot(var, "accumulator")
   return training_ops.resource_sparse_apply_adagrad(
       var.handle,
       acc.handle,
       math_ops.cast(self._learning_rate_tensor, grad.dtype),
       grad,
       indices,
       use_locking=self._use_locking)
Beispiel #5
0
 def _resource_apply_sparse(self, grad, var, indices):
     acc = self.get_slot(var, "accumulator")
     if isinstance(var, embedding_variable_ops.EmbeddingVariable):
         global_step = training_util.get_or_create_global_step()
         return gen_ev_ops.ev_sparse_apply_adagrad(
             var.handle,
             acc.handle,
             math_ops.cast(self._learning_rate_tensor, grad.dtype),
             grad,
             indices,
             global_step,
             use_locking=self._use_locking)
     else:
         return training_ops.resource_sparse_apply_adagrad(
             var.handle,
             acc.handle,
             math_ops.cast(self._learning_rate_tensor, grad.dtype),
             grad,
             indices,
             use_locking=self._use_locking)